Index: projects/clang400-import/etc/devd/usb.conf =================================================================== --- projects/clang400-import/etc/devd/usb.conf (revision 312719) +++ projects/clang400-import/etc/devd/usb.conf (revision 312720) @@ -1,5893 +1,5893 @@ # # $FreeBSD$ # # This file was automatically generated by "tools/tools/bus_autoconf/bus_autoconf.sh". # Please do not edit! # nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0104"; match "product" "0x00be"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0123"; match "product" "0x0001"; action "kldload -n uep"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03e8"; match "product" "0x0008"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03eb"; match "product" "0x2109"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "0x0121"; action "kldload -n ugensa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "(0x1016|0x1116|0x1216)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "(0x1b1d|0x1e1d)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "(0x2016|0x2116|0x2216)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "(0x241d|0x251d)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "(0x3016|0x3116)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "0x311d"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "0x3216"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "0x3524"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "(0x4016|0x4116|0x4216|0x5016|0x5116|0x5216)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "0x811c"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x03f0"; match "product" "0xca02"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0402"; match "product" "0x5632"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0403"; match "product" "(0x5259|0x6001|0x6004|0x6006|0x6006|0x6010|0x6011|0x6014|0x6015|0x8372|0x9378|0x9379|0x937a|0x937c|0x9868|0x9e90|0x9f80|0xa6d0|0xa6d1|0xabb8|0xb810|0xb811|0xb812|0xbaf8|0xbbe2|0xbca0|0xbca1|0xbca2|0xbca4|0xbcd8|0xbcd9|0xbcda|0xbdc8|0xbfd8|0xbfd9|0xbfda|0xbfdb|0xbfdc|0xc7d0|0xc850|0xc991|0xcaa0|0xcc48|0xcc49|0xcc4a|0xd010|0xd011|0xd012|0xd013|0xd014|0xd015|0xd016|0xd017|0xd070|0xd071|0xd388|0xd389|0xd38a|0xd38b|0xd38c|0xd38d|0xd38e|0xd38f|0xd578|0xd678|0xd738|0xd780|0xdaf8|0xdaf9|0xdafa|0xdafb|0xdafc|0xdafd|0xdafe|0xdaff|0xdc00|0xdc01|0xdd20|0xdf28|0xdf30|0xdf31|0xdf32|0xdf33|0xdf35|0xe000|0xe001|0xe002|0xe004|0xe006|0xe008|0xe009|0xe00a|0xe050|0xe0e8|0xe0e9|0xe0ea|0xe0eb|0xe0ec|0xe0ed|0xe0ee|0xe0ef|0xe0f0|0xe0f1|0xe0f2|0xe0f3|0xe0f4|0xe0f5|0xe0f6|0xe0f7|0xe40b|0xe520|0xe548|0xe6c8|0xe700|0xe729|0xe808|0xe809|0xe80a|0xe80b|0xe80c|0xe80d|0xe80e|0xe80f|0xe888|0xe889|0xe88a|0xe88b|0xe88c|0xe88d|0xe88e|0xe88f|0xea90|0xebe0|0xec88|0xec89|0xed22|0xed71|0xed72|0xed73|0xed74|0xee18|0xeee8|0xeee9|0xeeea|0xeeeb|0xeeec|0xeeed|0xeeee|0xeeef|0xef50|0xef51|0xf068|0xf069|0xf06a|0xf06b|0xf06c|0xf06d|0xf06e|0xf06f|0xf070|0xf0c0|0xf0c8|0xf208|0xf2d0|0xf3c0|0xf3c1|0xf3c2|0xf448|0xf449|0xf44a|0xf44b|0xf44c|0xf460|0xf608|0xf60b|0xf680|0xf850|0xf857|0xf9d0|0xf9d1|0xf9d2|0xf9d3|0xf9d4|0xf9d5|0xfa00|0xfa01|0xfa02|0xfa03|0xfa04|0xfa05|0xfa06|0xfa10|0xfa33|0xfa88|0xfad0|0xfaf0|0xfb58|0xfb59|0xfb5a|0xfb5b|0xfb5c|0xfb5d|0xfb5e|0xfb5f|0xfb80|0xfb99|0xfbfa|0xfc08|0xfc09|0xfc0a|0xfc0b|0xfc0c|0xfc0d|0xfc0e|0xfc0f|0xfc60|0xfc70|0xfc71|0xfc72|0xfc73|0xfc82|0xfd60|0xfe38|0xff00|0xff18|0xff1c|0xff1d|0xff20|0xff38|0xff39|0xff3a|0xff3b|0xff3c|0xff3d|0xff3e|0xff3f|0xffa8)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0408"; match "product" "0x0304"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0408"; match "product" "(0x1000|0xea02|0xea03|0xea04|0xea05|0xea06)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0409"; match "product" "(0x00d5|0x00d6|0x00d7)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0409"; match "product" "0x0249"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0409"; match "product" "0x0408"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0409"; match "product" "(0x8024|0x8025)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "(0x0001|0x0005|0x0009)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x0012"; action "kldload -n if_rue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x003d"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "(0x005e|0x0066|0x0067)"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x006e"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x008b"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x00b3"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "(0x00d8|0x00d9)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x00da"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x00e8"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "(0x00f4|0x0116|0x0119)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x012e"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "0x0137"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "(0x0148|0x0150|0x015d|0x016f|0x01a2|0x01a8|0x01ee)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0411"; match "product" "(0x0242|0x025d)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0413"; match "product" "0x2101"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0423"; match "product" "(0x000a|0x000c)"; action "kldload -n if_cue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x043e"; match "product" "0x9c01"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x044e"; match "product" "(0x3001|0x3002)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0456"; match "product" "(0xf000|0xf001)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x045a"; match "product" "(0x5001|0x5002)"; action "kldload -n urio"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x045b"; match "product" "0x0053"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x045e"; match "product" "0x0079"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x045e"; match "product" "0x007a"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x045e"; match "product" "(0x00ce|0x0400|0x0401|0x0402|0x0403|0x0404|0x0405|0x0406|0x0407|0x0408|0x0409|0x040a|0x040b|0x040c|0x040d|0x040e|0x040f|0x0410|0x0411|0x0412|0x0413|0x0414|0x0415|0x0416|0x0417|0x0432|0x0433|0x0434|0x0435|0x0436|0x0437|0x0438|0x0439|0x043a|0x043b|0x043c|0x043d|0x043e|0x043f|0x0440|0x0441|0x0442|0x0443|0x0444|0x0445|0x0446|0x0447|0x0448|0x0449|0x044a|0x044b|0x044c|0x044d|0x044e|0x044f|0x0450|0x0451|0x0452|0x0453|0x0454|0x0455|0x0456|0x0457|0x0458|0x0459|0x045a|0x045b|0x045c|0x045d|0x045e|0x045f|0x0460|0x0461|0x0462|0x0463|0x0464|0x0465|0x0466|0x0467|0x0468|0x0469|0x046a|0x046b|0x046c|0x046d|0x046e|0x046f|0x0470|0x0471|0x0472|0x0473|0x0474|0x0475|0x0476|0x0477|0x0478|0x0479|0x047a|0x047b|0x04c8|0x04c9|0x04ca|0x04cb|0x04cc|0x04cd|0x04ce|0x04d7|0x04d8|0x04d9|0x04da|0x04db|0x04dc|0x04dd|0x04de|0x04df|0x04e0|0x04e1|0x04e2|0x04e3|0x04e4|0x04e5|0x04e6|0x04e7|0x04e8|0x04e9|0x04ea)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0471"; match "product" "0x066a"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0471"; match "product" "0x1236"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0471"; match "product" "0x200f"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0482"; match "product" "0x0203"; action "kldload -n umodem"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0489"; match "product" "(0xe000|0xe003)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0489"; match "product" "(0xe027|0xe02c|0xe036|0xe03c|0xe03d|0xe042|0xe04e|0xe056|0xe057)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0499"; match "product" "(0x1000|0x1001|0x1002|0x1003|0x1004|0x1005|0x1006|0x1007|0x1008|0x1009|0x100a|0x100c|0x100d|0x100e|0x100f|0x1010|0x1011|0x1012|0x1013|0x1014|0x1015|0x1016|0x1017|0x1018|0x1019|0x101a|0x101b|0x101c|0x101d|0x101e|0x101f|0x1020|0x1021|0x1022|0x1023|0x1024|0x1025|0x1026|0x1027|0x1028|0x1029|0x102a|0x102b|0x102e|0x1030|0x1031|0x1032|0x1033|0x1034|0x1035|0x1036|0x1037|0x1038|0x1039|0x103a|0x103b|0x103c|0x103d|0x103e|0x103f|0x1040|0x1041|0x1042|0x1043|0x1044|0x1045|0x104e|0x104f|0x1050|0x1051|0x1052|0x1053|0x1054|0x1055|0x1056|0x1057|0x1058|0x1059|0x105a|0x105b|0x105c|0x105d|0x1503|0x2000|0x2001|0x2002|0x2003|0x5000|0x5001|0x5002|0x5003|0x5004|0x5005|0x5006|0x5007|0x5008|0x5009|0x500a|0x500b|0x500c|0x500d|0x500e|0x500f|0x7000|0x7010)"; action "kldload -n snd_uaudio"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x049f"; match "product" "(0x0003|0x0032)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x049f"; match "product" "0x505a"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04a4"; match "product" "0x0014"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04a5"; match "product" "0x4027"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04a5"; match "product" "0x4068"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04ad"; match "product" "(0x0301|0x0302|0x0303|0x0306)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04b4"; match "product" "0x1002"; action "kldload -n ufm"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04b7"; match "product" "0x0531"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04b8"; match "product" "(0x0521|0x0522)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bb"; match "product" "0x0901"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bb"; match "product" "(0x0904|0x0913)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bb"; match "product" "0x0930"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bb"; match "product" "0x093f"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bb"; match "product" "(0x0944|0x0945|0x0947|0x0948)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bb"; match "product" "0x0952"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bb"; match "product" "(0x0a03|0x0a0e)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bf"; match "product" "(0x0115|0x0117)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04bf"; match "product" "0x030a"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04c5"; match "product" "(0x1058|0x1079)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04ca"; match "product" "(0x2003|0x3005|0x3006|0x3008)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04da"; match "product" "0x2500"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04da"; match "product" "0x3900"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04dd"; match "product" "(0x8004|0x8005|0x8006|0x8007|0x9031)"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04dd"; match "product" "(0x9102|0x9121|0x9123|0x9151|0x91ac|0x9242)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04e8"; match "product" "0x2018"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04e8"; match "product" "(0x5f00|0x5f01|0x5f02|0x5f03|0x5f04)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04e8"; match "product" "0x6601"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04e8"; match "product" "(0x6611|0x6613|0x6615|0x6617|0x6619|0x661b|0x662e|0x6630|0x6632)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04e8"; match "product" "0x8001"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04f1"; match "product" "0x3008"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04f1"; match "product" "(0x3011|0x3012)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x04f2"; match "product" "(0xaff7|0xaff8|0xaff9|0xaffa|0xaffa)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0502"; match "product" "(0x1631|0x1632|0x16e1|0x16e2|0x16e3)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0506"; match "product" "(0x03e8|0x11f8)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0506"; match "product" "0x4601"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x0103"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x0109"; action "kldload -n umct"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x0121"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x0257"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x0409"; action "kldload -n umct"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x1102"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x1103"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x1203"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "(0x2102|0x2103)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x4050"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x5055"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x7050"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "(0x7050|0x7051)"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x705a"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x705c"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x705e"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "(0x8053|0x805c|0x815c)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x815f"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "(0x825a|0x825b)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x845a"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x905b"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "(0x935a|0x935b)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x050d"; match "product" "0x945a"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0525"; match "product" "(0x1080|0xa4a0)"; action "kldload -n udbp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0525"; match "product" "0xa4a2"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0536"; match "product" "0x01a0"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0543"; match "product" "(0x0ed9|0x1527|0x1529|0x152b|0x152e|0x1921|0x1922|0x1923)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0547"; match "product" "0x2008"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0547"; match "product" "0x2720"; action "kldload -n udbp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x054c"; match "product" "(0x0038|0x0066|0x0095|0x009a|0x00da|0x0169)"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x054c"; match "product" "0x0437"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0557"; match "product" "0x2002"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0557"; match "product" "0x2007"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0557"; match "product" "0x2008"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0557"; match "product" "0x2009"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0557"; match "product" "0x4000"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x055d"; match "product" "0x2018"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0565"; match "product" "0x0001"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0565"; match "product" "(0x0002|0x0003|0x0005)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0567"; match "product" "(0x2000|0x2002)"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x056c"; match "product" "0x8007"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x056e"; match "product" "(0x200c|0x4002|0x4005)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x056e"; match "product" "0x4008"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x056e"; match "product" "(0x400b|0x4010)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x056e"; match "product" "(0x5003|0x5004)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x056e"; match "product" "0xabc1"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x057c"; match "product" "(0x2200|0x3800)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x057c"; match "product" "0x8401"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0584"; match "product" "0xb000"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0584"; match "product" "0xb020"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0586"; match "product" "(0x3401|0x3407|0x3409|0x340a|0x340f|0x3410)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0586"; match "product" "0x3416"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0586"; match "product" "0x3417"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0586"; match "product" "(0x341a|0x341e)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0586"; match "product" "0x341f"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0586"; match "product" "0x3421"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0586"; match "product" "0x3426"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x058f"; match "product" "0x9720"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05a6"; match "product" "0x0101"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "product" "(0x020d|0x020e|0x020f|0x0210|0x0214|0x0215|0x0216|0x0217|0x0218|0x0219|0x021a|0x021b|0x021c)"; action "kldload -n atp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "product" "(0x0223|0x0224|0x0225)"; action "kldload -n wsp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "product" "(0x0229|0x022a|0x022b)"; action "kldload -n atp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "product" "(0x0230|0x0231|0x0232|0x0236|0x0237|0x0238|0x023f|0x0240|0x0241|0x0242|0x0243|0x0244|0x0245|0x0246|0x0247|0x0249|0x024a|0x024b|0x024c|0x024d|0x024e|0x0252|0x0253|0x0254|0x0259|0x025a|0x025b|0x0262|0x0263|0x0264|0x0272|0x0273|0x0274|0x0290|0x0291|0x0292)"; action "kldload -n wsp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "product" "(0x030a|0x030b)"; action "kldload -n atp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "product" "0x1402"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "product" "(0x8213|0x8215|0x8218|0x821a|0x821b|0x821f|0x8281|0x828f)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ad"; match "product" "0x0fba"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05c6"; match "product" "(0x1000|0x6000|0x6500|0x6613|0x9000|0x9002|0x9204|0x9205)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05cc"; match "product" "0x3000"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05db"; match "product" "(0x0003|0x0005|0x0009|0x000a|0x0011)"; action "kldload -n uvscom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05e0"; match "product" "(0x2000|0x2001|0x2002|0x2003|0x2004|0x2005|0x2006|0x2007|0x2008|0x2009|0x200a)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05e3"; match "product" "0x0501"; action "kldload -n udbp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05e9"; match "product" "(0x0008|0x0009)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0647"; match "product" "0x0100"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x065a"; match "product" "(0xc000|0xc001)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x066b"; match "product" "(0x200c|0x2202)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x066b"; match "product" "0x2202"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x066b"; match "product" "(0x2203|0x2204|0x2206|0x400b)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0675"; match "product" "0x0550"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x067b"; match "product" "(0x0000|0x0001)"; action "kldload -n udbp"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x067b"; match "product" "(0x0307|0x04bb|0x0609|0x0611|0x0612|0x1234|0x206a|0x2303)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x067b"; match "product" "0x2501"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x067b"; match "product" "(0x331a|0xaaa0|0xaaa2)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x067c"; match "product" "0x1001"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x067e"; match "product" "0x1001"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0681"; match "product" "0x3c06"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x06ce"; match "product" "0x8311"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x06e1"; match "product" "(0x0008|0x0009)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x06f8"; match "product" "0xe000"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x06f8"; match "product" "(0xe010|0xe020)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x06f8"; match "product" "0xe030"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x06f8"; match "product" "(0xe031|0xe032)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x06f8"; match "product" "0xe033"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0707"; match "product" "0x0100"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0707"; match "product" "(0x0200|0x0201)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0707"; match "product" "0xee13"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0707"; match "product" "0xee13"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0711"; match "product" "(0x0200|0x0210|0x0230)"; action "kldload -n umct"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0731"; match "product" "(0x0528|0x2003)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0745"; match "product" "0x0001"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0745"; match "product" "0x1000"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0769"; match "product" "0x11f2"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0769"; match "product" "0x11f3"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0769"; match "product" "0x31f3"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x077b"; match "product" "0x2226"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0789"; match "product" "0x010c"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0789"; match "product" "0x0160"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0789"; match "product" "(0x0162|0x0163|0x0164|0x0166|0x0168|0x0169)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x078b"; match "product" "0x1234"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x079b"; match "product" "0x0027"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x079b"; match "product" "(0x004a|0x0062)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07a6"; match "product" "(0x07c2|0x0986|0x8511|0x8513|0x8515)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "0x0001"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "(0x0004|0x000d)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "0x0017"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "0x002a"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "(0x002d|0x002e)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "(0x002f|0x003c|0x003f|0x0041|0x0042)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "0x0047"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "0x0056"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07aa"; match "product" "0x9601"; action "kldload -n if_udav"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "(0x110c|0x200c)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "(0x2770|0x2870|0x3070|0x3071|0x3072)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "0x4000"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "(0x4002|0x4003|0x4004|0x4007|0x400b|0x400c|0x4102|0x4104)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "0x420a"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "0x6001"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "(0x8178|0x8179|0x8188|0x8189)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "0xabc1"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07b8"; match "product" "(0xb21b|0xb21c|0xb21d|0xb21e|0xb21f)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07c9"; match "product" "0xb100"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07cf"; match "product" "(0x2001|0x2002|0x2003)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "(0x3300|0x3302|0x3303)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "0x3a09"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "0x3a0c"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "0x3a0f"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "(0x3c03|0x3c04|0x3c06|0x3c07)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "(0x3c09|0x3c0a|0x3c0b|0x3c0d|0x3c0e|0x3c0f)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "0x3c10"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x07d1"; match "product" "(0x3c11|0x3c13|0x3c15|0x3c16)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x081e"; match "product" "0xdf00"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x082d"; match "product" "(0x0100|0x0200|0x0300)"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0830"; match "product" "(0x0001|0x0002|0x0003|0x0020|0x0031|0x0040|0x0050|0x0060|0x0061|0x0070)"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0833"; match "product" "(0x012e|0x039f)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "0x1046"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "(0x4505|0x4506)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "0x4508"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "0x4521"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "0x5046"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "(0x6618|0x7511|0x7512|0x7522|0x8522|0xa512|0xa618|0xa701|0xa702|0xb522)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "0xc512"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "(0xc522|0xd522)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "0xe501"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x083a"; match "product" "0xf522"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0841"; match "product" "0x0001"; action "kldload -n urio"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "(0x1001|0x1002)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x1020"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x1040"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x1100"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x4240"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x4260"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x4300"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "(0x6100|0x6a00)"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "(0x9001|0x9010)"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x9012"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x9021"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "0x9040"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0846"; match "product" "(0x9041|0x9052)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0856"; match "product" "(0xac01|0xac02|0xac03|0xac11|0xac12|0xac16|0xac17|0xac18|0xac19|0xac25|0xac26|0xac27|0xac33|0xac34|0xac49|0xac50|0xba02)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x085a"; match "product" "(0x0008|0x0009)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x086e"; match "product" "0x1920"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x087d"; match "product" "0x5704"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x08d1"; match "product" "0x0001"; action "kldload -n if_cue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x08d1"; match "product" "0x0003"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x08dd"; match "product" "(0x0986|0x0987|0x0988|0x8511)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x08dd"; match "product" "0x90ff"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x08e6"; match "product" "0x5501"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x08fd"; match "product" "0x000a"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0915"; match "product" "(0x2000|0x2002)"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x091e"; match "product" "0x0004"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0921"; match "product" "0x1001"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0930"; match "product" "(0x0215|0x0219)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0930"; match "product" "(0x0700|0x0705|0x0706|0x0707|0x0708|0x0709|0x070a|0x070b)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0930"; match "product" "0x0a07"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0930"; match "product" "(0x0d45|0x1302)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x093c"; match "product" "(0x0601|0x0701)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x094b"; match "product" "0x0001"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0951"; match "product" "0x0008"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0951"; match "product" "0x000a"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x095a"; match "product" "0x3003"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0960"; match "product" "(0x0065|0x0066|0x0067)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0961"; match "product" "0x0010"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x099e"; match "product" "(0x0052|0x4000)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x09aa"; match "product" "0x1000"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x09d7"; match "product" "0x0100"; action "kldload -n ugensa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0a46"; match "product" "(0x0268|0x8515|0x9601)"; action "kldload -n if_udav"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0a5c"; match "product" "0x2033"; action "kldload -n ubtbcmfw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0a5c"; match "product" "0x21e1"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0acd"; match "product" "0x0300"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0ace"; match "product" "(0x1211|0x1215)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0ace"; match "product" "0x1221"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "(0x5000|0x6000|0x6050|0x6100|0x6150|0x6200|0x6250|0x6300|0x6350|0x6500|0x6501|0x6600|0x6601|0x6701)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0x6711"; action "kldload -n uhso"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "(0x6721|0x6741|0x6761|0x6800|0x6901)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0x6911"; action "kldload -n uhso"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0x6971"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0x6971"; action "kldload -n uhso"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0x7001"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0x7011"; action "kldload -n uhso"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "(0x7021|0x7041|0x7061|0x7100|0x7201|0x7211)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "(0x7251|0x7301|0x7361|0x7381|0x7401|0x7501)"; action "kldload -n uhso"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0x7601"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "(0x7601|0x9000|0xc031|0xd013|0xd031)"; action "kldload -n uhso"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "0xd033"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0af0"; match "product" "(0xd033|0xd055|0xd055)"; action "kldload -n uhso"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "(0x1706|0x1707)"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "(0x170c|0x171b)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x171d"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "(0x1723|0x1724)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "(0x1731|0x1732|0x1742|0x1760|0x1761|0x1784)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x1786"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x1790"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x1791"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x179d"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x17ab"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x17ad"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x17b5"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x17ba"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x17cb"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "0x17d2"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b05"; match "product" "(0x4200|0x4201|0x4202|0x420f|0x9200|0x9202)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b39"; match "product" "0x0103"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b39"; match "product" "0x0109"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b39"; match "product" "0x0421"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b3b"; match "product" "(0x1630|0x5630|0x6630)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b41"; match "product" "0x0011"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b63"; match "product" "0x6530"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b8c"; match "product" "0x2303"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b95"; match "product" "(0x1720|0x1780)"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b95"; match "product" "(0x178a|0x1790)"; action "kldload -n if_axge"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0b95"; match "product" "(0x7720|0x772a|0x772b|0x7e2b)"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0baf"; match "product" "0x0118"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0baf"; match "product" "0x0121"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bb2"; match "product" "0x6098"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bb4"; match "product" "(0x00ce|0x00cf|0x00cf|0x0a01|0x0a02|0x0a03|0x0a04|0x0a05|0x0a06|0x0a07|0x0a08|0x0a09|0x0a0a|0x0a0b|0x0a0c|0x0a0d|0x0a0e|0x0a0f|0x0a10|0x0a11|0x0a12|0x0a13|0x0a14|0x0a15|0x0a16|0x0a17|0x0a18|0x0a19|0x0a1a|0x0a1b|0x0a1c|0x0a1d|0x0a1e|0x0a1f|0x0a20|0x0a21|0x0a22|0x0a23|0x0a24|0x0a25|0x0a26|0x0a27|0x0a28|0x0a29|0x0a2a|0x0a2b|0x0a2c|0x0a2d|0x0a2e|0x0a2f|0x0a30|0x0a31|0x0a32|0x0a33|0x0a34|0x0a35|0x0a36|0x0a37|0x0a38|0x0a39|0x0a3a|0x0a3b|0x0a3c|0x0a3d|0x0a3e|0x0a3f|0x0a40|0x0a41|0x0a42|0x0a43|0x0a44|0x0a45|0x0a46|0x0a47|0x0a48|0x0a49|0x0a4a|0x0a4b|0x0a4c|0x0a4d|0x0a4e|0x0a4f|0x0a50|0x0a51|0x0a52|0x0a53|0x0a54|0x0a55|0x0a56|0x0a57|0x0a58|0x0a59|0x0a5a|0x0a5b|0x0a5c|0x0a5d|0x0a5e|0x0a5f|0x0a60|0x0a61|0x0a62|0x0a63|0x0a64|0x0a65|0x0a66|0x0a67|0x0a68|0x0a69|0x0a6a|0x0a6b|0x0a6c|0x0a6d|0x0a6e|0x0a6f|0x0a70|0x0a71|0x0a72|0x0a73|0x0a74|0x0a75|0x0a76|0x0a77|0x0a78|0x0a79|0x0a7a|0x0a7b|0x0a7c|0x0a7d|0x0a7e|0x0a7f|0x0a80|0x0a81|0x0a82|0x0a83|0x0a84|0x0a85|0x0a86|0x0a87|0x0a88|0x0a89|0x0a8a|0x0a8b|0x0a8c|0x0a8d|0x0a8e|0x0a8f|0x0a90|0x0a91|0x0a92|0x0a93|0x0a94|0x0a95|0x0a96|0x0a97|0x0a98|0x0a99|0x0a9a|0x0a9b|0x0a9c|0x0a9d|0x0a9e|0x0a9f|0x0bce)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "(0x0179|0x018a|0x317f)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "0x8150"; action "kldload -n if_rue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "(0x8152|0x8153)"; action "kldload -n if_ure"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "0x8170"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "(0x8171|0x8172|0x8173|0x8174)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "(0x8176|0x8177|0x8178|0x8179|0x817a|0x817b|0x817c|0x817d|0x817e|0x817f)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "(0x8187|0x8189)"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; - match "product" "(0x818a|0x8191)"; + match "product" "(0x818a|0x818b|0x8191)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "(0x8197|0x8198)"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "(0x8712|0x8712)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "0x8754"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bda"; match "product" "0xc512"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bdb"; match "product" "0x1002"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bed"; match "product" "(0x1100|0x1101)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bf8"; match "product" "0x1001"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0bf8"; match "product" "0x1009"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c10"; match "product" "0x0000"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c26"; match "product" "(0x0004|0x0009|0x000a|0x000b|0x000c|0x000d|0x0010|0x0011|0x0012|0x0013|0x0018)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c33"; match "product" "0x0010"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c44"; match "product" "0x03a2"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c45"; match "product" "0x7401"; action "kldload -n ugold"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c52"; match "product" "(0x2101|0x2102|0x2103|0x2104|0x2211|0x2212|0x2213|0x2221|0x2222|0x2223|0x2411|0x2412|0x2413|0x2421|0x2422|0x2423|0x2431|0x2432|0x2433|0x2441|0x2442|0x2443|0x2811|0x2812|0x2813|0x2821|0x2822|0x2823|0x2831|0x2832|0x2833|0x2841|0x2842|0x2843|0x2851|0x2852|0x2853|0x2861|0x2862|0x2863|0x2871|0x2872|0x2873|0x2881|0x2882|0x2883|0x9020)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c6c"; match "product" "0x04b2"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c7d"; match "product" "0x0005"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c88"; match "product" "0x17da"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c88"; match "product" "0x17da"; action "kldload -n ugensa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c88"; match "product" "0x180a"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0c8e"; match "product" "0x6000"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cad"; match "product" "0x9001"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0008"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0011"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0012"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0015"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x001a"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0022"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0023"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0025"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cde"; match "product" "0x0026"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cf3"; match "product" "(0x0001|0x0003|0x0005)"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cf3"; match "product" "(0x1001|0x1002|0x1010|0x1011)"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cf3"; match "product" "(0x3002|0x3004|0x311d)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cf3"; match "product" "0x9170"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0cf3"; match "product" "(0xe004|0xe019)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0d3a"; match "product" "0x0300"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0d46"; match "product" "(0x2020|0x2021)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0d8e"; match "product" "0x3762"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0d8e"; match "product" "(0x7801|0x7811)"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0db0"; match "product" "(0x3820|0x3821|0x3822|0x3870|0x3871)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0db0"; match "product" "(0x6861|0x6865|0x6869)"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0db0"; match "product" "(0x6874|0x6877)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0db0"; match "product" "(0x6899|0x821a|0x822a|0x870a|0x871a|0x899a)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0db0"; match "product" "(0xa861|0xa874)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0db7"; match "product" "0x0002"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0dcd"; match "product" "0x0001"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x000d"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0017"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0021"; action "kldload -n if_mos"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0028"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "(0x002b|0x002c|0x002d|0x0039|0x003b|0x003c|0x003d|0x003e|0x003f|0x0040|0x0041|0x0042)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0045"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "(0x0047|0x0048|0x004a)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x004b"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x004d"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0052"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0056"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "(0x005c|0x0061)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0072"; action "kldload -n if_axge"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x0074"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "0x061c"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "(0x9071|0x9075)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df6"; match "product" "(0x90ac|0x9712)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0df7"; match "product" "0x0620"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e0b"; match "product" "(0x9031|0x9041)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e55"; match "product" "0x110b"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e66"; match "product" "(0x0001|0x0003|0x0009|0x000b)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e66"; match "product" "(0x0015|0x0016)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e66"; match "product" "(0x0019|0x0023)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e66"; match "product" "0x400c"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e67"; match "product" "0x0002"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e7e"; match "product" "0x1001"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0e8d"; match "product" "0x763f"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0ea0"; match "product" "0x6858"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0eab"; match "product" "0xc893"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0eb0"; match "product" "0x9020"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0eb0"; match "product" "0x9021"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0eb0"; match "product" "0x9071"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0eba"; match "product" "(0x1080|0x2080)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0eef"; match "product" "(0x0001|0x0002)"; action "kldload -n uep"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f3d"; match "product" "0x0112"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f3d"; match "product" "0x0112"; action "kldload -n ugensa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f3d"; match "product" "0x68a3"; action "kldload -n usie"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f3d"; match "product" "0x68aa"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f4e"; match "product" "0x0200"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f88"; match "product" "0x3012"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f88"; match "product" "0x3014"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f94"; match "product" "(0x0001|0x0005)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0f98"; match "product" "0x0201"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0fb8"; match "product" "(0x3001|0x3002|0x3003|0x4001)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0fcf"; match "product" "(0x1003|0x1004|0x1006)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0fd8"; match "product" "0x0001"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0fde"; match "product" "0xca05"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0fe6"; match "product" "(0x8101|0x9700)"; action "kldload -n if_udav"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1004"; match "product" "0x618f"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x100d"; match "product" "(0x9031|0x9032)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1011"; match "product" "0x3198"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x103c"; match "product" "0x1629"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x103e"; match "product" "0x03e8"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1044"; match "product" "0x8001"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1044"; match "product" "0x8002"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1044"; match "product" "0x8007"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1044"; match "product" "(0x8008|0x800a)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1044"; match "product" "(0x800b|0x800c|0x800d)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1066"; match "product" "(0x00ce|0x0300|0x0500|0x0600|0x0700)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x106c"; match "product" "0x3701"; action "kldload -n umodem"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10a6"; match "product" "0xaa26"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10ab"; match "product" "0x10c5"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10b5"; match "product" "0xac70"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10b5"; match "product" "0xac70"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10bd"; match "product" "0x1427"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10c4"; match "product" "(0x0f91|0x1101|0x1601|0x800a|0x803b|0x8043|0x8044)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10c4"; match "product" "0x8053"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10c4"; match "product" "(0x8066|0x806f|0x807a|0x80c4|0x80ca|0x80dd|0x80ed|0x80f6|0x8115|0x813d|0x813f|0x814a|0x814a|0x814b|0x8156|0x815e|0x815f|0x818b|0x819f|0x81a6|0x81a9|0x81ac|0x81ad|0x81c8|0x81e2|0x81e7|0x81e8|0x81f2|0x8218|0x822b|0x826b|0x8293|0x82f9|0x8341|0x8382|0x83a8|0x83d8|0x8411|0x8418|0x846e|0x8477|0x85ea|0x85eb|0x85f8|0x8664|0x8665|0x88a4|0x88a5|0xea60|0xea61|0xea70|0xea71|0xea80|0xf001|0xf002|0xf003|0xf004)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10c5"; match "product" "0xea61"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x10ce"; match "product" "0xea61"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1114"; match "product" "(0x0001|0x0004|0x0006)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x114b"; match "product" "0x0110"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x114b"; match "product" "0x0150"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1163"; match "product" "0x0100"; action "kldload -n ucycom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1182"; match "product" "0x1388"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1186"; match "product" "0x3e04"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1189"; match "product" "0x0893"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1199"; match "product" "(0x0017|0x0018|0x0019|0x0020|0x0021|0x0022|0x0023|0x0024|0x0025|0x0026|0x0027|0x0028|0x0029|0x0112|0x0120|0x0218)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1199"; match "product" "0x0218"; action "kldload -n umodem"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1199"; match "product" "(0x0220|0x0224|0x0fff)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1199"; match "product" "0x0fff"; action "kldload -n usie"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1199"; match "product" "(0x6802|0x6803|0x6804|0x6805|0x6808|0x6809|0x6812|0x6813|0x6815|0x6816|0x6820|0x6821|0x6822|0x6832|0x6833|0x6834|0x6835|0x6838|0x6839|0x683a|0x683b|0x683c|0x683d|0x683e|0x6850|0x6851|0x6852|0x6853|0x6855|0x6856|0x6859|0x685a|0x6880|0x6890|0x6891|0x6892|0x6893|0x68a3)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1199"; match "product" "0x68a3"; action "kldload -n usie"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1199"; - match "product" "(0x68aa|0x68c0|0x9041)"; + match "product" "(0x68aa|0x68c0|0x9041|0x9071)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x11ad"; match "product" "0x0701"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x11d9"; match "product" "(0x1002|0x1003)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x11f5"; match "product" "(0x0001|0x0003|0x0004|0x0005)"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x11f6"; match "product" "0x2001"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x11f7"; match "product" "0x02df"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1209"; match "product" "(0x1002|0x1006)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1231"; match "product" "(0xce01|0xce02)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x126f"; match "product" "0xa006"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x128d"; match "product" "0x0001"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x129b"; match "product" "0x1666"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x129b"; match "product" "0x1828"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12d1"; match "product" "(0x1001|0x1003|0x1004|0x1401|0x1402|0x1403|0x1404|0x1405|0x1406|0x1407|0x1408|0x1409|0x140a|0x140b|0x140c|0x140d|0x140e|0x140f|0x1410|0x1411|0x1412|0x1413|0x1414|0x1415|0x1416|0x1417|0x1418|0x1419|0x141a|0x141b|0x141c|0x141d|0x141e|0x141f|0x1420|0x1421|0x1422|0x1423|0x1424|0x1425|0x1426|0x1427|0x1428|0x1429|0x142a|0x142b|0x142c|0x142d|0x142e|0x142f|0x1430|0x1431|0x1432|0x1433|0x1434|0x1435|0x1436|0x1437|0x1438|0x1439|0x143a|0x143b|0x143c|0x143d|0x143e|0x143f|0x1446|0x1464|0x1465|0x14ac|0x14c9|0x14cf|0x14d1|0x14fe|0x1505|0x1506|0x1520|0x1521|0x1526)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12d1"; match "product" "0x155b"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12d1"; - match "product" "(0x1573|0x1803|0x1c05|0x1c0b)"; + match "product" "(0x1573|0x15c1|0x1803|0x1c05|0x1c0b)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12ef"; match "product" "0x0100"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1342"; match "product" "0x0202"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1342"; match "product" "0x0204"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1371"; match "product" "(0x9022|0x9032)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1371"; match "product" "0x9401"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1385"; match "product" "(0x4250|0x5f00|0x5f02)"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13ad"; match "product" "0x9999"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "0x000c"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "(0x000d|0x0011)"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "0x0018"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "0x001a"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "(0x0020|0x0023)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "0x0024"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "0x002f"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13b1"; match "product" "0x003f"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13d2"; match "product" "0x0400"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13d3"; match "product" "(0x3247|0x3262|0x3273|0x3284)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13d3"; match "product" "0x3304"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13d3"; match "product" "0x3305"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13d3"; match "product" "(0x3306|0x3309|0x3310|0x3311|0x3325)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13d3"; match "product" "(0x3357|0x3358|0x3359)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x13d3"; match "product" "(0x3362|0x3375|0x3393)"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1410"; match "product" "(0x1100|0x1110|0x1120|0x1130|0x1400|0x1410|0x1420|0x1430|0x1450|0x2100|0x2110|0x2120|0x2130|0x2400|0x2410|0x2420|0x4100|0x4400|0x5010|0x5020|0x5041|0x5100|0x6000|0x6002|0x7001|0x7031|0x7042)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1416"; match "product" "0x1110"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1435"; match "product" "0x0326"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1435"; match "product" "0x0427"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1435"; match "product" "0x0711"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1435"; match "product" "0x0804"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1435"; match "product" "(0x0826|0x082a)"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1453"; match "product" "0x4026"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1457"; match "product" "(0x5118|0x5118)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1472"; match "product" "0x0009"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1482"; match "product" "0x3c09"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1485"; match "product" "(0x0001|0x0002)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x148f"; match "product" "0x1706"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x148f"; match "product" "0x2070"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x148f"; match "product" "0x2570"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x148f"; match "product" "(0x2573|0x2671)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x148f"; match "product" "(0x2770|0x2870|0x2878|0x3070|0x3071|0x3072|0x3370|0x3572|0x3573|0x5370|0x5572|0x8070)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x148f"; match "product" "0x9020"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x148f"; match "product" "0x9021"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14b2"; match "product" "(0x3300|0x3301|0x3302)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14b2"; match "product" "0x3c02"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14b2"; match "product" "(0x3c06|0x3c07|0x3c08|0x3c09|0x3c11|0x3c12)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14b2"; match "product" "0x3c22"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14b2"; match "product" "(0x3c23|0x3c25|0x3c25|0x3c27|0x3c28)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14ea"; match "product" "0xab10"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14ea"; match "product" "0xab11"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x14ea"; match "product" "0xab13"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1555"; match "product" "0x0004"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1557"; match "product" "0x7720"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1557"; match "product" "0x8150"; action "kldload -n if_rue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x157e"; match "product" "0x3006"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x157e"; match "product" "(0x300a|0x300b|0x300d)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x157e"; match "product" "0x300e"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x157e"; match "product" "0x3204"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x157e"; match "product" "0x3205"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1582"; match "product" "0x6003"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x15a9"; match "product" "0x0004"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x15a9"; match "product" "(0x0006|0x0010)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x15ba"; match "product" "(0x0003|0x002b)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x15c5"; match "product" "0x0008"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x15e8"; match "product" "(0x9100|0x9110)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1614"; match "product" "(0x0800|0x0802|0x7002)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1631"; match "product" "0x6200"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1631"; match "product" "0xc019"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1645"; match "product" "(0x0005|0x0008|0x8005)"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x166a"; match "product" "(0x0101|0x0201|0x0301|0x0303|0x0304|0x0305|0x0401)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x167b"; match "product" "0x4001"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x168c"; match "product" "0x0001"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1690"; match "product" "0x0601"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1690"; match "product" "(0x0710|0x0712)"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1690"; match "product" "0x0722"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1690"; match "product" "(0x0740|0x0744)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16ab"; match "product" "(0x7801|0x7811)"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16d5"; match "product" "(0x6202|0x6501)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16d5"; match "product" "0x6501"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16d5"; match "product" "0x6502"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16d5"; match "product" "0x6502"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16d6"; match "product" "0x0001"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16d8"; match "product" "(0x6006|0x6280)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16d8"; match "product" "0x6280"; action "kldload -n ugensa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x16dc"; match "product" "(0x0010|0x0011|0x0012|0x0015)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1726"; match "product" "0x1000"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1726"; match "product" "0x1000"; action "kldload -n ubsa"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1737"; match "product" "0x0039"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1737"; match "product" "(0x0070|0x0071)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1737"; match "product" "0x0073"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1737"; match "product" "(0x0077|0x0078|0x0079)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1740"; match "product" "0x0100"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1740"; match "product" "(0x0605|0x0615)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1740"; match "product" "0x2000"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1740"; match "product" "(0x9603|0x9605)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1740"; match "product" "(0x9701|0x9702|0x9703|0x9705|0x9706|0x9707|0x9708|0x9709|0x9801)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1761"; match "product" "0x0b05"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x177f"; match "product" "0x0153"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x177f"; match "product" "0x0154"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x177f"; match "product" "(0x0302|0x0313)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1781"; match "product" "0x0c30"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x17a8"; match "product" "(0x0001|0x0005)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x17e9"; match "product" "(0x0059|0x0100|0x0117|0x0136|0x0138|0x0141|0x015a|0x0198|0x019b|0x01ba|0x01bb|0x01d4|0x01d7|0x01e2|0x0215|0x024c|0x02a9|0x02e9|0x0377|0x03e0|0x401a)"; action "kldload -n udl"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x17ef"; match "product" "0x304b"; action "kldload -n if_axge"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x17ef"; match "product" "0x7203"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x17f4"; match "product" "0xaaaa"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1843"; match "product" "0x0200"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x18c5"; match "product" "0x0002"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x18c5"; match "product" "(0x0008|0x0012)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x18e8"; match "product" "(0x6196|0x6229)"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x18e8"; match "product" "0x6232"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x18e8"; match "product" "0x6238"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x18e8"; match "product" "0x6259"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x18ef"; match "product" "0xe00f"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x19d2"; match "product" "(0x0001|0x0002|0x0003|0x0004|0x0005|0x0006|0x0007|0x0008|0x0009|0x000a|0x000b|0x000c|0x000d|0x000e|0x000f|0x0010|0x0011|0x0012|0x0013|0x0014|0x0015|0x0016|0x0017|0x0018|0x0019|0x0020|0x0021|0x0022|0x0023|0x0024|0x0025|0x0026|0x0027|0x0028|0x0029|0x0030|0x0031|0x0032|0x0033|0x0037|0x0039|0x0042|0x0043|0x0048|0x0049|0x0051|0x0052|0x0053|0x0054|0x0055|0x0057|0x0058|0x0059|0x0060|0x0061|0x0062|0x0063|0x0064|0x0066|0x0069|0x0070|0x0073|0x0076|0x0078|0x0082|0x0086|0x0103|0x0117|0x1179|0x1181|0x1420|0x1514|0x1516|0x2000|0x2002|0x2003|0xffdd|0xffde|0xfff1|0xfff5|0xfffe)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x19f5"; match "product" "0x9909"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1a79"; match "product" "0x6001"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1a86"; match "product" "(0x5523|0x7523)"; action "kldload -n uchcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1a8d"; match "product" "(0x1002|0x1003|0x1004|0x1005|0x1006|0x1007|0x1008|0x1009|0x100a|0x100b|0x100c|0x100d|0x100e|0x100f|0x1010|0x1011|0x1012)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1adb"; match "product" "0x0001"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1b3d"; match "product" "(0x0100|0x0101|0x0102|0x0103|0x0104|0x0105|0x0106|0x0107|0x0108|0x0109|0x010a|0x010b|0x010c|0x010d|0x010e|0x010f|0x0110|0x0111|0x0112|0x0113|0x0114|0x0115|0x0116|0x0117|0x0118|0x0119|0x011a|0x011b|0x011c|0x011d|0x011e|0x011f|0x0120|0x0121|0x0122|0x0123|0x0124|0x0125|0x0126|0x0128|0x0129|0x012a|0x012b|0x012d|0x012e|0x012f|0x0130|0x0131|0x0132|0x0133|0x0134|0x0135|0x0136|0x0137|0x0138|0x0139|0x013a|0x013b|0x013c|0x013d|0x013e|0x013f|0x0140|0x0141|0x0142|0x0143|0x0144|0x0145|0x0146|0x0147|0x0148|0x0149|0x014a|0x014b|0x014c|0x014d|0x014e|0x014f|0x0150|0x0151|0x0152|0x0153|0x0159|0x015a|0x015b|0x015c|0x015d|0x015e|0x015f|0x0160|0x0161|0x0162|0x0163|0x0164|0x0165|0x0166|0x0167|0x0168|0x0169|0x016a|0x016b|0x016c|0x016d|0x016e|0x016f|0x0170|0x0171|0x0172|0x0173|0x0174|0x0175|0x0176|0x0177|0x0178|0x0179|0x017a|0x017b|0x017c|0x017d|0x017e|0x017f|0x0180|0x0181|0x0182|0x0183|0x0184|0x0185|0x0186|0x0187|0x0188|0x0189|0x018a|0x018b|0x018c|0x018d|0x018e|0x018f|0x0190|0x0191|0x0192|0x0193|0x0194|0x0195|0x0196|0x0197|0x0198|0x0199|0x019a|0x019b|0x019c|0x019d|0x019e|0x019f|0x01a0|0x01a1|0x01a2|0x01a3|0x01a4|0x01a5|0x01a6|0x01a7|0x01a8|0x01a9|0x01aa|0x01ab|0x01ac|0x01ad|0x01ae|0x01af|0x01b0|0x01b1|0x01b2|0x01b3|0x01b4|0x01b5|0x01b6|0x01b7|0x01b8|0x01b9|0x01ba|0x01bb|0x01bc|0x01bd|0x01be|0x01bf|0x01c0|0x01c1|0x01c2|0x01c3|0x01c4|0x01c5|0x01c6|0x01c7|0x01c8|0x01c9|0x01ca|0x01cb|0x01cc|0x01cd|0x01ce|0x01cf|0x01d0|0x01d1|0x01d2|0x01d3|0x01d4|0x01d5|0x01d6|0x01d7|0x01d8|0x01d9|0x01da|0x01db|0x01dc|0x01dd|0x01de|0x01df|0x01e0|0x01e1|0x01e2|0x01e3|0x01e4|0x01e5|0x01e6|0x01e7|0x01e8|0x01e9|0x01ea|0x01eb|0x01ec|0x01ed|0x01ee|0x01ef|0x01f0|0x01f1|0x01f2|0x01f3|0x01f4|0x01f5|0x01f6|0x01f7|0x01f8|0x01f9|0x01fa|0x01fb|0x01fc|0x01fd|0x01fe|0x01ff)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1b75"; match "product" "0x3072"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1b75"; match "product" "0x8187"; action "kldload -n if_urtw"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1b91"; match "product" "0x0064"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1bbb"; match "product" "(0x0000|0xf000)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1bc7"; match "product" "(0x1003|0x1004)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1bc9"; match "product" "0x6001"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1be3"; match "product" "0x07a6"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1c0c"; match "product" "0x0102"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1c9e"; match "product" "(0x6061|0x9603|0x9605|0xf000)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1cf1"; match "product" "(0x0001|0x0004|0x001c|0x0022)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1d09"; match "product" "0x4000"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1d34"; match "product" "0x0004"; action "kldload -n uled"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1d4d"; match "product" "(0x0002|0x000c|0x000e|0x0010)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1da5"; match "product" "(0x4512|0x4515|0x4519|0x4523)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1e0e"; match "product" "(0x9000|0x9200|0xce16)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1e29"; match "product" "(0x0102|0x0501)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1eda"; match "product" "0x2310"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x1fb9"; match "product" "(0x0100|0x0200|0x0201|0x0202|0x0203|0x0300|0x0301|0x0302|0x0303|0x0400|0x0401|0x0402|0x0403|0x0404|0x0600|0x0601|0x0602|0x0700|0x0701)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "(0x1a00|0x1a02)"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "0x200c"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; - match "product" "(0x3307|0x3308|0x3309|0x330a|0x330d|0x330f|0x3310|0x3314|0x3315|0x3316|0x3318)"; + match "product" "(0x3307|0x3308|0x3309|0x330a|0x330d|0x330f|0x3310|0x3314|0x3315|0x3316|0x3318|0x3319)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "(0x3a00|0x3a02|0x3a04)"; action "kldload -n if_uath"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "0x3c00"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "0x3c05"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "(0x3c09|0x3c0a|0x3c15|0x3c1a|0x3c1b|0x3c1f|0x3c20)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "0x4000"; action "kldload -n if_kue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "(0x4001|0x4002|0x4003|0x400b|0x4102)"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "0x4a00"; action "kldload -n if_axge"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "(0x7d02|0x7e12|0xa707|0xa805)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2001"; match "product" "0xabc1"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "(0x1201|0x4902)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "0x5303"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "0x5304"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "0xab01"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "(0xab24|0xab25)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "0xab28"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "(0xab2a|0xab2b|0xab2e|0xab30)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "0xab50"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "(0xc007|0xed01)"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "0xed02"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "(0xed06|0xed14)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2019"; match "product" "0xed17"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x203d"; match "product" "(0x1480|0x14a1|0x14a9)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x20b7"; match "product" "0x0713"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x20b8"; match "product" "0x8888"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x20b9"; match "product" "0x1682"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x20f4"; match "product" "0x624d"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x20f4"; match "product" "0x646b"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x20f4"; match "product" "(0x648b|0x805b)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2100"; match "product" "(0x9e52|0x9e54)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x22b8"; match "product" "(0x4204|0x4214|0x4224|0x4234|0x4244)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x22b8"; match "product" "(0x600c|0x6027)"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x22b8"; match "product" "0x710f"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x22de"; match "product" "0x6801"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2357"; - match "product" "0x0101"; + match "product" "(0x0101|0x0108|0x0109)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2405"; match "product" "0x0003"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x2478"; match "product" "0x2008"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x3195"; match "product" "(0xf190|0xf280|0xf281)"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x3334"; match "product" "0x1701"; action "kldload -n if_aue"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x3340"; match "product" "(0x011c|0x0326|0x0426|0x043a|0x051c|0x053a|0x071c|0x0b1c|0x0e3a|0x0f1c|0x0f3a|0x1326|0x191c|0x2326|0x3326)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x3708"; match "product" "(0x20ce|0x21ce)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x4113"; match "product" "(0x0210|0x0211|0x0400|0x0410)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x413c"; match "product" "(0x4001|0x4002|0x4003|0x4004|0x4005|0x4006|0x4007|0x4008|0x4009)"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x413c"; match "product" "(0x8102|0x8104)"; action "kldload -n if_upgt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x413c"; match "product" "(0x8114|0x8115|0x8116|0x8117|0x8118|0x8128|0x8129|0x8133|0x8134|0x8135|0x8136|0x8137|0x8138|0x8180|0x8181|0x8182)"; action "kldload -n u3g"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x413c"; match "product" "0x8197"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x413c"; match "product" "0x9500"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x4348"; match "product" "0x5523"; action "kldload -n uchcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x4505"; match "product" "0x0010"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x4766"; match "product" "0x0001"; action "kldload -n uvisor"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x4855"; match "product" "(0x0090|0x0091)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x5050"; match "product" "(0x0100|0x0101|0x0102|0x0103|0x0104|0x0105|0x0106|0x0107|0x0300|0x0301|0x0400|0x0500|0x0700|0x0800|0x0900|0x0a00|0x0b00|0x0c00|0x0d00|0x0e00|0x0f00|0x1000|0x8000|0x8001|0x8002|0x8003|0x8004|0x8005)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x5173"; match "product" "0x1809"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x5372"; match "product" "0x2303"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x5a57"; match "product" "0x0260"; action "kldload -n if_ural"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x5a57"; match "product" "(0x0280|0x0282|0x0283|0x0284|0x5257)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x5e04"; match "product" "0xce00"; action "kldload -n uipaq"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x6189"; match "product" "0x182d"; action "kldload -n if_axe"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x6189"; match "product" "0x2068"; action "kldload -n uplcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x6547"; match "product" "0x0232"; action "kldload -n uark"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x6891"; match "product" "0xa727"; action "kldload -n if_zyd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x7392"; match "product" "0x7318"; action "kldload -n if_rum"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x7392"; match "product" "(0x7611|0x7612|0x7622)"; action "kldload -n if_rsu"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x7392"; match "product" "(0x7711|0x7717|0x7718|0x7733)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x7392"; match "product" "(0x7811|0x7822|0xa811|0xa812|0xa822)"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x8516"; match "product" "(0x2070|0x2770|0x2870|0x3070|0x3071|0x3072|0x3572)"; action "kldload -n if_run"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x9710"; match "product" "0x7703"; action "kldload -n umoscom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x9710"; match "product" "0x7730"; action "kldload -n if_mos"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x9710"; match "product" "0x7820"; action "kldload -n umcs"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x9710"; match "product" "(0x7830|0x7832)"; action "kldload -n if_mos"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x9710"; match "product" "0x7840"; action "kldload -n umcs"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x9846"; match "product" "0x9041"; action "kldload -n if_rtwn_usb"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x9e88"; match "product" "0x9e8f"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0xcace"; match "product" "0x0300"; action "kldload -n if_otus"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0xdaae"; match "product" "0xead6"; action "kldload -n uslcom"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0xdeee"; match "product" "(0x0300|0x0302|0x0303)"; action "kldload -n uftdi"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0489"; match "intclass" "0xff"; match "intsubclass" "0x01"; match "intprotocol" "0x01"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "intclass" "0xff"; match "intsubclass" "0x01"; match "intprotocol" "0x01"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x05ac"; match "intclass" "0xff"; match "intsubclass" "0xfd"; match "intprotocol" "0x01"; action "kldload -n if_ipheth"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0830"; match "intclass" "0x02"; match "intsubclass" "0x02"; match "intprotocol" "0xff"; action "kldload -n if_urndis"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x0a5c"; match "intclass" "0xff"; match "intsubclass" "0x01"; match "intprotocol" "0x01"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12d1"; match "intclass" "0x02"; match "intsubclass" "0x02"; match "intprotocol" "0xff"; action "kldload -n umodem"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12d1"; match "intclass" "0xff"; match "intsubclass" "0x02"; match "intprotocol" "0x16"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12d1"; match "intclass" "0xff"; match "intsubclass" "0x02"; match "intprotocol" "0x46"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "vendor" "0x12d1"; match "intclass" "0xff"; match "intsubclass" "0x02"; match "intprotocol" "0x76"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "(host|device)"; match "intclass" "0x02"; match "intsubclass" "0x02"; match "intprotocol" "0x00"; action "kldload -n umodem"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "(host|device)"; match "intclass" "0x02"; match "intsubclass" "0x02"; match "intprotocol" "0x01"; action "kldload -n umodem"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x02"; match "intsubclass" "0x02"; match "intprotocol" "0xff"; action "kldload -n if_urndis"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x03"; match "intsubclass" "0x01"; match "intprotocol" "0x01"; action "kldload -n ukbd"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x03"; match "intsubclass" "0x01"; match "intprotocol" "0x02"; action "kldload -n ums"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x07"; match "intsubclass" "0x01"; match "intprotocol" "0x01"; action "kldload -n ulpt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x07"; match "intsubclass" "0x01"; match "intprotocol" "0x02"; action "kldload -n ulpt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x07"; match "intsubclass" "0x01"; match "intprotocol" "0x03"; action "kldload -n ulpt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0xe0"; match "intsubclass" "0x01"; match "intprotocol" "0x01"; action "kldload -n ng_ubt"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0xe0"; match "intsubclass" "0x01"; match "intprotocol" "0x03"; action "kldload -n if_urndis"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0xef"; match "intsubclass" "0x01"; match "intprotocol" "0x01"; action "kldload -n if_urndis"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0xff"; match "intsubclass" "0x5d"; match "intprotocol" "0x01"; action "kldload -n uhid"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x01"; match "intsubclass" "0x01"; action "kldload -n snd_uaudio"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x01"; match "intsubclass" "0x03"; action "kldload -n snd_uaudio"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "(host|device)"; match "intclass" "0x02"; match "intsubclass" "0x06"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "(host|device)"; match "intclass" "0x02"; match "intsubclass" "0x0a"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "(host|device)"; match "intclass" "0x02"; match "intsubclass" "0x0d"; action "kldload -n if_cdce"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x02"; match "intsubclass" "0x88"; action "kldload -n ufoma"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x03"; action "kldload -n uhid"; }; nomatch 32 { match "bus" "uhub[0-9]+"; match "mode" "host"; match "intclass" "0x08"; action "kldload -n umass"; }; -# 2743 USB entries processed +# 2751 USB entries processed Index: projects/clang400-import/lib/libc/aarch64/sys/cerror.S =================================================================== --- projects/clang400-import/lib/libc/aarch64/sys/cerror.S (revision 312719) +++ projects/clang400-import/lib/libc/aarch64/sys/cerror.S (revision 312720) @@ -1,42 +1,42 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); ENTRY(cerror) .hidden cerror sub sp, sp, #16 stp x0, lr, [sp] bl _C_LABEL(__error) ldp x1, lr, [sp] - str x1, [x0] + str w1, [x0] movn x0, #0 movn x1, #0 add sp, sp, #16 ret END(cerror) Index: projects/clang400-import/sbin/restore/restore.8 =================================================================== --- projects/clang400-import/sbin/restore/restore.8 (revision 312719) +++ projects/clang400-import/sbin/restore/restore.8 (revision 312720) @@ -1,503 +1,505 @@ .\" Copyright (c) 1985, 1991, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)restore.8 8.4 (Berkeley) 5/1/95 .\" $FreeBSD$ .\" .Dd October 12, 2006 .Dt RESTORE 8 .Os .Sh NAME .Nm restore , .Nm rrestore .Nd "restore files or file systems from backups made with dump" .Sh SYNOPSIS .Nm .Fl i .Op Fl dDhmNuvy .Op Fl b Ar blocksize .Op Fl f Ar file | Fl P Ar pipecommand .Op Fl s Ar fileno .Nm .Fl R .Op Fl dDNuvy .Op Fl b Ar blocksize .Op Fl f Ar file | Fl P Ar pipecommand .Op Fl s Ar fileno .Nm .Fl r .Op Fl dDNuvy .Op Fl b Ar blocksize .Op Fl f Ar file | Fl P Ar pipecommand .Op Fl s Ar fileno .Nm .Fl t .Op Fl dDhNuvy .Op Fl b Ar blocksize .Op Fl f Ar file | Fl P Ar pipecommand .Op Fl s Ar fileno .Op Ar .Nm .Fl x .Op Fl dDhmNuvy .Op Fl b Ar blocksize .Op Fl f Ar file | Fl P Ar pipecommand .Op Fl s Ar fileno .Op Ar .Sh DESCRIPTION The .Nm utility performs the inverse function of .Xr dump 8 . A full backup of a file system may be restored and subsequent incremental backups layered on top of it. Single files and directory subtrees may be restored from full or partial backups. The .Nm utility works across a network; to do this see the .Fl f and .Fl P flags described below. Other arguments to the command are file or directory names specifying the files that are to be restored. Unless the .Fl h flag is specified (see below), the appearance of a directory name refers to the files and (recursively) subdirectories of that directory. .Pp .Nm may also be invoked as .Nm rrestore . The .Bx 4.3 option syntax is implemented for backward compatibility, but is not documented here. .Pp Exactly one of the following flags is required: .Bl -tag -width Ds .It Fl i This mode allows interactive restoration of files from a dump. After reading in the directory information from the dump, .Nm provides a shell like interface that allows the user to move around the directory tree selecting files to be extracted. The available commands are given below; for those commands that require an argument, the default is the current directory. .Bl -tag -width Fl .It Ic add Op Ar arg The current directory or specified argument is added to the list of files to be extracted. If a directory is specified, then it and all its descendents are added to the extraction list (unless the .Fl h flag is specified on the command line). Files that are on the extraction list are prepended with a ``*'' when they are listed by .Ic ls . .It Ic \&cd Ar arg Change the current working directory to the specified argument. .It Ic delete Op Ar arg The current directory or specified argument is deleted from the list of files to be extracted. If a directory is specified, then it and all its descendents are deleted from the extraction list (unless the .Fl h flag is specified on the command line). The most expedient way to extract most of the files from a directory is to add the directory to the extraction list and then delete those files that are not needed. .It Ic extract All the files that are on the extraction list are extracted from the dump. The .Nm utility will ask which volume the user wishes to mount. The fastest way to extract a few files is to start with the last volume, and work towards the first volume. .It Ic help List a summary of the available commands. .It Ic \&ls Op Ar arg List the current or specified directory. Entries that are directories are appended with a ``/''. Entries that have been marked for extraction are prepended with a ``*''. If the verbose flag is set the inode number of each entry is also listed. .It Ic pwd Print the full pathname of the current working directory. .It Ic quit Exit immediately, even if the extraction list is not empty. .It Ic setmodes All the directories that have been added to the extraction list have their owner, modes, and times set; nothing is extracted from the dump. This is useful for cleaning up after a restore has been prematurely aborted. .It Ic verbose The sense of the .Fl v flag is toggled. When set, the verbose flag causes the .Ic ls command to list the inode numbers of all entries. It also causes .Nm to print out information about each file as it is extracted. .It Ic what Display dump header information, which includes: date, level, label, and the file system and host dump was made from. .El .It Fl R Request a particular tape of a multi volume set on which to restart a full restore (see the .Fl r flag below). This is useful if the restore has been interrupted. .It Fl r Restore (rebuild a file system). The target file system should be made pristine with .Xr newfs 8 , mounted and the user .Xr cd 1 Ns 'd into the pristine file system before starting the restoration of the initial level 0 backup. If the level 0 restores successfully, the .Fl r flag may be used to restore any necessary incremental backups on top of the level 0. The .Fl r flag precludes an interactive file extraction and can be detrimental to one's health if not used carefully (not to mention the disk). An example: .Bd -literal -offset indent newfs /dev/da0s1a mount /dev/da0s1a /mnt cd /mnt restore rf /dev/sa0 .Ed .Pp Note that .Nm leaves a file .Pa restoresymtable in the root directory to pass information between incremental restore passes. This file should be removed when the last incremental has been restored. .Pp The .Nm utility , in conjunction with .Xr newfs 8 and .Xr dump 8 , may be used to modify file system parameters such as size or block size. .It Fl t The names of the specified files are listed if they occur on the backup. If no file argument is given, then the root directory is listed, which results in the entire content of the backup being listed, unless the .Fl h flag has been specified. Note that the .Fl t flag replaces the function of the old .Xr dumpdir 8 program. .It Fl x The named files are read from the given media. If a named file matches a directory whose contents are on the backup and the .Fl h flag is not specified, the directory is recursively extracted. The owner, modification time, and mode are restored (if possible). If no file argument is given, then the root directory is extracted, which results in the entire content of the backup being extracted, unless the .Fl h flag has been specified. .El .Pp The following additional options may be specified: .Bl -tag -width Ds .It Fl b Ar blocksize The number of kilobytes per dump record. If the .Fl b option is not specified, .Nm tries to determine the media block size dynamically. .It Fl d Sends verbose debugging output to the standard error. .It Fl D This puts .Nm into degraded mode, causing restore to operate less efficiently but to try harder to read corrupted backups. .It Fl f Ar file Read the backup from .Ar file ; .Ar file may be a special device file like .Pa /dev/sa0 (a tape drive), .Pa /dev/da1c (a disk drive), an ordinary file, or .Sq Fl (the standard input). If the name of the file is of the form .Dq host:file , or .Dq user@host:file , .Nm reads from the named file on the remote host using .Xr rmt 8 . .It Fl P Ar pipecommand Use .Xr popen 3 to execute the .Xr sh 1 script string defined by .Ar pipecommand as the input for every volume in the backup. This child pipeline's .Dv stdout .Pq Pa /dev/fd/1 is redirected to the .Nm input stream, and the environment variable .Ev RESTORE_VOLUME is set to the current volume number being read. The .Ar pipecommand script is started each time a volume is loaded, as if it were a tape drive. .It Fl h Extract the actual directory, rather than the files that it references. This prevents hierarchical restoration of complete subtrees from the dump. .It Fl m Extract by inode numbers rather than by file name. This is useful if only a few files are being extracted, and one wants to avoid regenerating the complete pathname to the file. .It Fl N Do the extraction normally, but do not actually write any changes to disk. This can be used to check the integrity of dump media or other test purposes. .It Fl s Ar fileno Read from the specified .Ar fileno on a multi-file tape. File numbering starts at 1. .It Fl u When creating certain types of files, restore may generate a warning diagnostic if they already exist in the target directory. To prevent this, the .Fl u (unlink) flag causes restore to remove old entries before attempting to create new ones. +This flag is recommended when using extended attributes +to avoid improperly accumulating attributes on pre-existing files. .It Fl v Normally .Nm does its work silently. The .Fl v (verbose) flag causes it to type the name of each file it treats preceded by its file type. .It Fl y Do not ask the user whether to abort the restore in the event of an error. Always try to skip over the bad block(s) and continue. .El .Sh ENVIRONMENT .Bl -tag -width ".Ev TMPDIR" .It Ev TAPE Device from which to read backup. .It Ev TMPDIR Name of directory where temporary files are to be created. .El .Sh FILES .Bl -tag -width "./restoresymtable" -compact .It Pa /dev/sa0 the default tape drive .It Pa /tmp/rstdir* file containing directories on the tape. .It Pa /tmp/rstmode* owner, mode, and time stamps for directories. .It Pa \&./restoresymtable information passed between incremental restores. .El .Sh DIAGNOSTICS The .Nm utility complains if it gets a read error. If .Fl y has been specified, or the user responds .Ql y , .Nm will attempt to continue the restore. .Pp If a backup was made using more than one tape volume, .Nm will notify the user when it is time to mount the next volume. If the .Fl x or .Fl i flag has been specified, .Nm will also ask which volume the user wishes to mount. The fastest way to extract a few files is to start with the last volume, and work towards the first volume. .Pp There are numerous consistency checks that can be listed by .Nm . Most checks are self-explanatory or can ``never happen''. Common errors are given below. .Pp .Bl -tag -width Ds -compact .It : not found on tape The specified file name was listed in the tape directory, but was not found on the tape. This is caused by tape read errors while looking for the file, and from using a dump tape created on an active file system. .Pp .It expected next file , got A file that was not listed in the directory showed up. This can occur when using a dump created on an active file system. .Pp .It Incremental dump too low When doing incremental restore, a dump that was written before the previous incremental dump, or that has too low an incremental level has been loaded. .Pp .It Incremental dump too high When doing incremental restore, a dump that does not begin its coverage where the previous incremental dump left off, or that has too high an incremental level has been loaded. .Pp .It Tape read error while restoring .It Tape read error while skipping over inode .It Tape read error while trying to resynchronize A tape (or other media) read error has occurred. If a file name is specified, then its contents are probably partially wrong. If an inode is being skipped or the tape is trying to resynchronize, then no extracted files have been corrupted, though files may not be found on the tape. .Pp .It resync restore, skipped blocks After a dump read error, .Nm may have to resynchronize itself. This message lists the number of blocks that were skipped over. .El .Sh SEE ALSO .Xr dump 8 , .Xr mount 8 , .Xr newfs 8 , .Xr rmt 8 .Sh HISTORY The .Nm utility appeared in .Bx 4.2 . .Sh BUGS The .Nm utility can get confused when doing incremental restores from dumps that were made on active file systems without the .Fl L option (see .Xr dump 8 ) . .Pp A level zero dump must be done after a full restore. Because restore runs in user code, it has no control over inode allocation; thus a full dump must be done to get a new set of directories reflecting the new inode numbering, even though the contents of the files is unchanged. .Pp To do a network restore, you have to run restore as root. This is due to the previous security history of dump and restore. (restore is written to be setuid root, but we are not certain all bugs are gone from the restore code - run setuid at your own risk.) .Pp The temporary files .Pa /tmp/rstdir* and .Pa /tmp/rstmode* are generated with a unique name based on the date of the dump and the process ID (see .Xr mktemp 3 ) , except for when .Fl r or .Fl R is used. Because .Fl R allows you to restart a .Fl r operation that may have been interrupted, the temporary files should be the same across different processes. In all other cases, the files are unique because it is possible to have two different dumps started at the same time, and separate operations should not conflict with each other. Index: projects/clang400-import/share/man/man4/rtwn.4 =================================================================== --- projects/clang400-import/share/man/man4/rtwn.4 (revision 312719) +++ projects/clang400-import/share/man/man4/rtwn.4 (revision 312720) @@ -1,241 +1,242 @@ .\" $OpenBSD: rtwn.4,v 1.2 2015/07/09 11:28:53 stsp Exp $ .\" .\" Copyright (c) 2010 Damien Bergamini .\" Copyright (c) 2015 Stefan Sperling .\" Copyright (c) 2016 Andriy Voskoboinyk .\" .\" Permission to use, copy, modify, and distribute this software for any .\" purpose with or without fee is hereby granted, provided that the above .\" copyright notice and this permission notice appear in all copies. .\" .\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES .\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF .\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR .\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES .\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN .\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF .\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. .\" .\" $FreeBSD$ .\" -.Dd October 17, 2016 +.Dd January 24, 2017 .Dt RTWN 4 .Os .Sh NAME .Nm rtwn .Nd Realtek IEEE 802.11 wireless network driver .Sh SYNOPSIS .Cd "options RTWN_DEBUG" .Cd "options RTWN_WITHOUT_UCODE" .Pp To compile this driver into the kernel, place the following lines in your kernel configuration file: .Bd -ragged -offset indent .Cd "device rtwn" .Cd "device rtwnfw" .Cd "device rtwn_usb" .Cd "device rtwn_pci" .Cd "device wlan" .Cd "device firmware" .Ed .Pp Alternatively, to load the driver as a module at boot time, place following lines in .Xr loader.conf 5 : .Bd -literal -offset indent if_rtwn_load="YES" if_rtwn_pci_load="YES" if_rtwn_usb_load="YES" .Ed .Sh DESCRIPTION The .Nm driver provides support for wireless network devices based on -the Realtek RTL8192C, RTL8188E, RTL8812A and RTL8821A programming APIs. -These APIs are used by a wide variety of chips; most chips with USB -and some with PCI interface are supported. +the Realtek RTL8192C, RTL8188E, RTL8192E, RTL8812A and RTL8821A +programming APIs. These APIs are used by a wide variety of chips; +most chips with USB and some with PCI interface are supported. .Pp To enable use for PCI/PCIe systems, see the rtwn_pci(4) driver; for USB devices, use the rtwn_usb(4) driver. .Pp The driver supports .Cm station , .Cm adhoc , .Cm hostap and .Cm monitor mode operation. There are no limitations for number of .Cm monitor mode virtual interfaces; in addition to any other virtual interface one .Cm station interface can be added (Note: RTL8821AU supports two non-monitor mode interfaces at the same time). .Pp All chips have hardware support for WEP, AES-CCM and TKIP encryption. .Pp The .Nm driver can be configured at runtime with .Xr ifconfig 8 . .Sh FILES .Bl -tag -width ".Pa /usr/share/doc/legal/realtek.LICENSE" -compact .It Pa /usr/share/doc/legal/realtek.LICENSE .Nm firmware license .El .Pp The driver (if not compiled with .Cd options RTWN_WITHOUT_UCODE ) may use following firmware files, which are loaded when an interface is brought up: .Bl -tag -width Ds -offset indent -compact .It Pa /boot/kernel/rtwn-rtl8188eufw.ko .It Pa /boot/kernel/rtwn-rtl8192cfwE_B.ko .It Pa /boot/kernel/rtwn-rtl8192cfwE.ko .It Pa /boot/kernel/rtwn-rtl8192cfwT.ko .It Pa /boot/kernel/rtwn-rtl8192cfwU.ko +.It Pa /boot/kernel/rtwn-rtl8192eufw.ko .It Pa /boot/kernel/rtwn-rtl8812aufw.ko .It Pa /boot/kernel/rtwn-rtl8821aufw.ko .El .Sh EXAMPLES Join an existing BSS network (i.e., connect to an access point): .Bd -literal -offset indent ifconfig wlan create wlandev rtwn0 inet 192.168.0.20 \e netmask 0xffffff00 .Ed .Pp Join a specific BSS network with network name .Dq Li my_net : .Pp .Dl "ifconfig wlan create wlandev rtwn0 ssid my_net up" .Pp Join a specific BSS network with 64-bit WEP encryption: .Bd -literal -offset indent ifconfig wlan create wlandev rtwn0 ssid my_net \e wepmode on wepkey 0x1234567890 weptxkey 1 up .Ed .Pp Create an IBSS network with 128-bit WEP encryption on the channel 4: .Bd -literal -offset indent ifconfig wlan create wlandev rtwn0 wlanmode adhoc ssid my_net \e wepmode on wepkey 0x01020304050607080910111213 weptxkey 1 \e channel 4 .Ed .Pp Join/create an 802.11b IBSS network with network name .Dq Li my_net : .Bd -literal -offset indent ifconfig wlan0 create wlandev rtwn0 wlanmode adhoc ifconfig wlan0 inet 192.168.0.22 netmask 0xffffff00 ssid my_net \e mode 11b .Ed .Pp Create a host-based access point: .Bd -literal -offset indent ifconfig wlan0 create wlandev rtwn0 wlanmode hostap ifconfig wlan0 inet 192.168.0.10 netmask 0xffffff00 ssid my_ap .Ed .Sh LOADER TUNABLES Tunables can be set at the .Xr loader 8 prompt before booting the kernel or stored in .Xr loader.conf 5 . .Bl -tag -width indent .It Va dev.rtwn.%d.hwcrypto This tunable controls how key slots are assigned: .br 0 - disable h/w crypto support. Features that require access to frame contents (e.g., TCP/UDP/IP Rx checksum validation) will not work; .br 1 - use h/w crypto support for pairwise keys only; .br 2 - use h/w crypto support for all keys; may not work for multi-vap configurations. .br By default it is set to 1. .It Va dev.rtwn.%d.ratectl This tunable switches between rate control implementations: .br 0 - no rate control; .br 1 - driver sends 'tx complete' reports to net80211; algorithm is controlled via net80211; .br 2 - firmware-based rate control. .br By default it is set to 1; however driver may choose another algorithm in case if it is not implemented .br Currently selected algorithm is reported via .Em Va dev.rtwn.%d.ratectl_selected read-only OID. .El .Sh DIAGNOSTICS .Bl -diag .It "rtwn%d: could not read efuse byte at address 0x%x" .It "rtwn%d: %s: cannot read rom, error %d" There was an error while reading ROM; device attach will be aborted. This should not happen. .It "rtwn%d: failed loadfirmware of file %s" For some reason, the driver was unable to read the microcode file from the filesystem. The file might be missing or corrupted. The driver will disable firmware-dependent features. .It "rtwn%d: wrong firmware size (%zu)" .It "rtwn%d: %s: failed to upload firmware %s (error %d)" .It "rtwn%d: timeout waiting for firmware readiness" Firmware upload failed; the file might be corrupted. The driver will disable firmware-dependent features. This should not happen. .It "rtwn%d: device timeout" A frame dispatched to the hardware for transmission did not complete in time. The driver will reset the hardware. This should not happen. .El .Sh SEE ALSO .Xr intro 4 , .Xr netintro 4 , .Xr rtwn_pci 4 , .Xr rtwn_usb 4 , .Xr rtwnfw 4 , .Xr wlan 4 , .Xr wlan_amrr 4 , .Xr wlan_ccmp 4 , .Xr wlan_tkip 4 , .Xr wlan_wep 4 , .Xr wlan_xauth 4 , .Xr hostapd 8 , .Xr ifconfig 8 , .Xr wpa_supplicant 8 .Sh HISTORY The .Cm urtwn driver first appeared in .Ox 4.9 and .Fx 10.0 ; the .Nm driver first appeared in .Ox 5.8 . .Sh AUTHORS The .Nm driver was initially written by .An -nosplit .An Stefan Sperling Aq Mt stsp@openbsd.org and ported by .An Kevin Lo Aq Mt kevlo@freebsd.org . It was based on the .Cm urtwn driver written by .An Damien Bergamini Aq Mt damien.bergamini@free.fr . .Sh BUGS The .Nm driver currently does not implement firmware-based rate control. Index: projects/clang400-import/share/man/man4/rtwn_usb.4 =================================================================== --- projects/clang400-import/share/man/man4/rtwn_usb.4 (revision 312719) +++ projects/clang400-import/share/man/man4/rtwn_usb.4 (revision 312720) @@ -1,113 +1,117 @@ .\"- .\" Copyright (c) 2011 Adrian Chadd, Xenion Pty Ltd .\" Copyright (c) 2016 Andriy Voskoboinyk .\" All rights reserved. .\"" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer, .\" without modification. .\" 2. Redistributions in binary form must reproduce at minimum a disclaimer .\" similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any .\" redistribution must be conditioned upon including a substantially .\" similar Disclaimer requirement for further binary redistribution. .\" .\" NO WARRANTY .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT .\" LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY .\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL .\" THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, .\" OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER .\" IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF .\" THE POSSIBILITY OF SUCH DAMAGES. .\" .\" $FreeBSD$ .\"/ -.Dd January 6, 2017 +.Dd January 24, 2017 .Dt RTWN_USB 4 .Os .Sh NAME .Nm rtwn_usb .Nd "Realtek USB device glue" .Sh SYNOPSIS To compile this driver into the kernel, place the following lines in your kernel configuration file: .Bd -ragged -offset indent .Cd "device xhci" .Cd "device ehci" .Cd "device uhci" .Cd "device ohci" .Cd "device usb" .Cd "device rtwn_usb" .Cd "device wlan" .Ed .Sh DESCRIPTION This module provides the USB bus glue needed for the devices supported by the .Xr rtwn 4 driver. .Sh HARDWARE The .Nm -driver supports Realtek RTL8188CU/RTL8188RU/RTL8188EU/RTL8192CU/RTL8812AU/RTL8821AU +driver supports Realtek RTL8188CU/RTL8188RU/RTL8188EU/RTL8192CU/RTL8192EU/RTL8812AU/RTL8821AU based USB wireless network adapters, including: .Pp .Bl -column -compact "Belkin F7D1102 Surf Wireless Micro" "Bus" .It Em Card Ta Em Bus .It "Alfa AWUS036NHR v2" Ta USB 2.0 .It "ASUS USB-AC56" Ta USB 3.0 .It "ASUS USB-N10 NANO" Ta USB 2.0 .It "Belkin F7D1102 Surf Wireless Micro" Ta USB 2.0 .It "Buffalo WI-U2-433DM" Ta USB 2.0 .It "Buffalo WI-U3-866D" Ta USB 3.0 .It "D-Link DWA-123 rev D1" Ta USB 2.0 .It "D-Link DWA-125 rev D1" Ta USB 2.0 .It "D-Link DWA-131" Ta USB 2.0 +.It "D-Link DWA-131 rev E1" Ta USB 2.0 .It "D-Link DWA-171 rev A1" Ta USB 2.0 .It "D-Link DWA-172 rev A1" Ta USB 2.0 .It "D-Link DWA-180 rev A1" Ta USB 2.0 .It "D-Link DWA-182 rev C1" Ta USB 3.0 .It "Edimax EW-7811Un" Ta USB 2.0 .It "Edimax EW-7811UTC" Ta USB 2.0 .It "Edimax EW-7822UAC" Ta USB 3.0 .It "Elecom WDC-150SU2M" Ta USB 2.0 .It "EnGenius EUB1200AC" Ta USB 3.0 .It "Hawking HD65U" Ta USB 2.0 .It "Hercules Wireless N USB Pico" Ta USB 2.0 .It "I-O Data WN-AC867U" Ta USB 3.0 .It "Linksys WUSB6300" Ta USB 3.0 .It "NEC AtermWL900U PA-WL900U" Ta USB 3.0 .It "Netgear A6100" Ta USB 2.0 .It "Netgear WNA1000M" Ta USB 2.0 .It "Planex GW-900D" Ta USB 3.0 .It "Realtek RTL8192CU" Ta USB 2.0 .It "Realtek RTL8188CUS" Ta USB 2.0 .It "Sitecom WLA-7100" Ta USB 3.0 .It "TP-Link Archer T4U" Ta USB 3.0 .It "TP-LINK TL-WN723N v3" Ta USB 2.0 .It "TP-LINK TL-WN725N v2" Ta USB 2.0 .It "TP-LINK TL-WN821N v4" Ta USB 2.0 +.It "TP-LINK TL-WN821N v5" Ta USB 2.0 +.It "TP-LINK TL-WN822N v4" Ta USB 2.0 .It "TP-LINK TL-WN823N v1" Ta USB 2.0 +.It "TP-LINK TL-WN823N v2" Ta USB 2.0 .It "TRENDnet TEW-805UB" Ta USB 3.0 .It "ZyXEL NWD6605" Ta USB 3.0 .El .Sh SEE ALSO .Xr rtwn 4 , .Xr rtwn_pci 4 , .Xr rtwnfw 4 , .Xr usb 4 .Sh BUGS The .Nm driver does not support any of the 802.11ac capabilities offered by the adapters. Additional work is required in .Xr ieee80211 9 before those features can be supported. Index: projects/clang400-import/share/man/man4/rtwnfw.4 =================================================================== --- projects/clang400-import/share/man/man4/rtwnfw.4 (revision 312719) +++ projects/clang400-import/share/man/man4/rtwnfw.4 (revision 312720) @@ -1,88 +1,90 @@ .\" Copyright (c) 2015 Kevin Lo .\" Copyright (c) 2016 Andriy Voskoboinyk .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd October 28, 2015 +.Dd January 24, 2017 .Dt RTWNFW 4 .Os .Sh NAME .Nm rtwnfw .Nd "Firmware Module for Realtek Wireless driver" .Sh SYNOPSIS To compile this module into the kernel, place the following line in your kernel configuration file: .Bd -ragged -offset indent .Cd "device rtwnfw" .Ed .Pp This will include all firmware images inside the kernel. If you want to pick only the firmware image for your network adapter choose one of the following: .Bd -ragged -offset indent .Cd "device rtwn-rtl8188eufw" .Cd "device rtwn-rtl8192cfwE_B" .Cd "device rtwn-rtl8192cfwE" .Cd "device rtwn-rtl8192cfwT" .Cd "device rtwn-rtl8192cfwU" +.Cd "device rtwn-rtl8192eufw" .Cd "device rtwn-rtl8812aufw" .Cd "device rtwn-rtl8821aufw" .Ed .Pp Alternatively, to load all firmware images as a module at boot time, place the following line in .Xr loader.conf 5 : .Bd -literal -offset indent rtwn-rtl8188eufw_load="YES" rtwn-rtl8192cfwE_B_load="YES" rtwn-rtl8192cfwE_load="YES" rtwn-rtl8192cfwT_load="YES" rtwn-rtl8192cfwU_load="YES" +rtwn-rtl8192eufw_load="YES" rtwn-rtl8812aufw_load="YES" rtwn-rtl8821aufw_load="YES" .Ed .Sh DESCRIPTION rtwn-rtl8192cfwE and rtl8192cfwE_B modules provide access to firmware sets for the Realtek RTL8188CE chip based PCIe adapters. Other modules provide access to firmware sets for the Realtek RTL8188CUS, -RTL8188CE-VAU, RTL8188EUS, RTL8188RU, RTL8192CU, RTL8812AU and RTL8821AU -chip based USB WiFi adapters. +RTL8188CE-VAU, RTL8188EUS, RTL8188RU, RTL8192CU, RTL8192EU, RTL8812AU and +RTL8821AU chip based USB WiFi adapters. They may be statically linked into the kernel, or loaded as a modules. .Pp For the loaded firmware to be enabled for use the license at .Pa /usr/share/doc/legal/realtek.LICENSE must be agreed to by adding the following line to .Xr loader.conf 5 : .Pp .Dl "legal.realtek.license_ack=1" .Sh FILES .Bl -tag -width ".Pa /usr/share/doc/legal/realtek.LICENSE" -compact .It Pa /usr/share/doc/legal/realtek.LICENSE .Nm firmware license .El .Sh SEE ALSO .Xr rtwn 4 , .Xr firmware 9 Index: projects/clang400-import/share/man/man5/tmpfs.5 =================================================================== --- projects/clang400-import/share/man/man5/tmpfs.5 (revision 312719) +++ projects/clang400-import/share/man/man5/tmpfs.5 (revision 312720) @@ -1,174 +1,178 @@ .\"- .\" Copyright (c) 2007 Xin LI .\" Copyright (c) 2017 The FreeBSD Foundation, Inc. .\" All rights reserved. .\" .\" Part of this documentation was written by .\" Konstantin Belousov under sponsorship .\" from the FreeBSD Foundation. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS DOCUMENTATION IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\"- .\" Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS .\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED .\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS .\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd January 17, 2017 +.Dd January 20, 2017 .Dt TMPFS 5 .Os .Sh NAME .Nm tmpfs .Nd "in-memory file system" .Sh SYNOPSIS To compile this driver into the kernel, place the following line in your kernel configuration file: .Bd -ragged -offset indent .Cd "options TMPFS" .Ed .Pp Alternatively, to load the driver as a module at boot time, place the following line in .Xr loader.conf 5 : .Bd -literal -offset indent tmpfs_load="YES" .Ed .Sh DESCRIPTION The .Nm -driver implements in-memory, or +driver implements an in-memory, or .Tn tmpfs file system. The filesystem stores both file metadata and data in main memory. This allows very fast and low latency accesses to the data. The data is volatile. An umount or system reboot invalidates it. These properties make the filesystem's mounts suitable for fast -scratch storage, e.g. +scratch storage, like .Pa /tmp . .Pp If the system becomes low on memory and swap is configured (see .Xr swapon 8 ), -file data may be written to the swap space, freeing memory +the system can transfer file data to swap space, freeing memory for other needs. -The current implementation never swaps out metadata, including -the directory content. +Metadata, including the directory content, is never swapped out by the +current implementation. Keep this in mind when planning the mount limits, especially when expecting to place many small files on a tmpfs mount. .Pp -When a file from a tmpfs mount is mmaped (see -.Xr mmap 2 ) -into the process address space, the swap VM object which manages the file -pages is used to implement mapping and to avoid double-copying of +When +.Xr mmap 2 +is used on a file from a tmpfs mount, the swap VM object managing the +file pages is used to implement mapping and avoid double-copying of the file data. This quirk causes process inspection tools, like .Xr procstat 1 , to report anonymous memory mappings instead of file mappings. .Sh OPTIONS The following options are available when mounting .Nm file systems: .Bl -tag -width "It Cm maxfilesize" .It Cm gid Specifies the group ID of the root inode of the file system. Defaults to the mount point's GID. .It Cm uid Specifies the user ID of the root inode of the file system. Defaults to the mount point's UID. .It Cm mode Specifies the mode (in octal notation) of the root inode of the file system. Defaults to the mount point's mode. +.It Cm nonc +Do not use namecache to resolve names to files for the created mount. +This saves memory, but currently might impair scalability for highly +used mounts on large machines. .It Cm inodes Specifies the maximum number of nodes available to the file system. If not specified, the file system chooses a reasonable maximum based on the file system size, which can be limited with the .Cm size option. .It Cm size Specifies the total file system size in bytes. If zero (the default) or a value larger than SIZE_MAX - PAGE_SIZE is given, the available amount of memory (including main memory and swap space) will be used. .It Cm maxfilesize Specifies the maximum file size in bytes. Defaults to the maximum possible value. .El .Sh EXAMPLES To mount a .Nm memory file system: .Pp .Dl "mount -t tmpfs tmpfs /tmp" .Sh SEE ALSO .Xr procstat 1 , .Xr nmount 2 , .Xr mmap 2 , .Xr unmount 2 , .Xr fstab 5 , .Xr mdmfs 8 , .Xr mount 8 , .Xr swapinfo 8 , .Xr swapon 8 .Sh HISTORY The .Nm driver first appeared in .Fx 7.0 . .Sh AUTHORS .An -nosplit The .Nm kernel implementation was written by .An Julio M. Merino Vidal Aq Mt jmmv@NetBSD.org as a Google Summer of Code project. .Pp .An Rohit Jalan and others ported it from .Nx to .Fx . .Pp This manual page was written by .An Xin LI Aq Mt delphij@FreeBSD.org . Index: projects/clang400-import/sys/amd64/linux/linux_sysvec.c =================================================================== --- projects/clang400-import/sys/amd64/linux/linux_sysvec.c (revision 312719) +++ projects/clang400-import/sys/amd64/linux/linux_sysvec.c (revision 312720) @@ -1,995 +1,990 @@ /*- * Copyright (c) 2013 Dmitry Chagin * Copyright (c) 2004 Tim J. Robbins * Copyright (c) 2003 Peter Wemm * Copyright (c) 2002 Doug Rabson * Copyright (c) 1998-1999 Andrew Gallatin * Copyright (c) 1994-1996 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #define __ELF_WORD_SIZE 64 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_VERSION(linux64, 1); #if BYTE_ORDER == LITTLE_ENDIAN #define SHELLMAGIC 0x2123 /* #! */ #else #define SHELLMAGIC 0x2321 #endif #if defined(DEBUG) SYSCTL_PROC(_compat_linux, OID_AUTO, debug, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, linux_sysctl_debug, "A", "Linux 64 debugging control"); #endif /* * Allow the this functions to use the ldebug() facility * even though they are not syscalls themselves. Map them * to syscall 0. This is slightly less bogus than using * ldebug(sigreturn). */ #define LINUX_SYS_linux_rt_sendsig 0 const char *linux_kplatform; static int linux_szsigcode; static vm_object_t linux_shared_page_obj; static char *linux_shared_page_mapping; extern char _binary_linux_locore_o_start; extern char _binary_linux_locore_o_end; extern struct sysent linux_sysent[LINUX_SYS_MAXSYSCALL]; SET_DECLARE(linux_ioctl_handler_set, struct linux_ioctl_handler); static register_t * linux_copyout_strings(struct image_params *imgp); static int elf_linux_fixup(register_t **stack_base, struct image_params *iparams); static boolean_t linux_trans_osrel(const Elf_Note *note, int32_t *osrel); static void linux_vdso_install(void *param); static void linux_vdso_deinstall(void *param); static void linux_set_syscall_retval(struct thread *td, int error); static int linux_fetch_syscall_args(struct thread *td, struct syscall_args *sa); static void linux_exec_setregs(struct thread *td, struct image_params *imgp, u_long stack); static int linux_vsyscall(struct thread *td); /* * Linux syscalls return negative errno's, we do positive and map them * Reference: * FreeBSD: src/sys/sys/errno.h * Linux: linux-2.6.17.8/include/asm-generic/errno-base.h * linux-2.6.17.8/include/asm-generic/errno.h */ static int bsd_to_linux_errno[ELAST + 1] = { -0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -35, -12, -13, -14, -15, -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -29, -30, -31, -32, -33, -34, -11,-115,-114, -88, -89, -90, -91, -92, -93, -94, -95, -96, -97, -98, -99, -100,-101,-102,-103,-104,-105,-106,-107,-108,-109, -110,-111, -40, -36,-112,-113, -39, -11, -87,-122, -116, -66, -6, -6, -6, -6, -6, -37, -38, -9, -6, -6, -43, -42, -75,-125, -84, -95, -16, -74, -72, -67, -71 }; #define LINUX_T_UNKNOWN 255 static int _bsd_to_linux_trapcode[] = { LINUX_T_UNKNOWN, /* 0 */ 6, /* 1 T_PRIVINFLT */ LINUX_T_UNKNOWN, /* 2 */ 3, /* 3 T_BPTFLT */ LINUX_T_UNKNOWN, /* 4 */ LINUX_T_UNKNOWN, /* 5 */ 16, /* 6 T_ARITHTRAP */ 254, /* 7 T_ASTFLT */ LINUX_T_UNKNOWN, /* 8 */ 13, /* 9 T_PROTFLT */ 1, /* 10 T_TRCTRAP */ LINUX_T_UNKNOWN, /* 11 */ 14, /* 12 T_PAGEFLT */ LINUX_T_UNKNOWN, /* 13 */ 17, /* 14 T_ALIGNFLT */ LINUX_T_UNKNOWN, /* 15 */ LINUX_T_UNKNOWN, /* 16 */ LINUX_T_UNKNOWN, /* 17 */ 0, /* 18 T_DIVIDE */ 2, /* 19 T_NMI */ 4, /* 20 T_OFLOW */ 5, /* 21 T_BOUND */ 7, /* 22 T_DNA */ 8, /* 23 T_DOUBLEFLT */ 9, /* 24 T_FPOPFLT */ 10, /* 25 T_TSSFLT */ 11, /* 26 T_SEGNPFLT */ 12, /* 27 T_STKFLT */ 18, /* 28 T_MCHK */ 19, /* 29 T_XMMFLT */ 15 /* 30 T_RESERVED */ }; #define bsd_to_linux_trapcode(code) \ ((code)td_proc; frame = td->td_frame; sa->args[0] = frame->tf_rdi; sa->args[1] = frame->tf_rsi; sa->args[2] = frame->tf_rdx; sa->args[3] = frame->tf_rcx; sa->args[4] = frame->tf_r8; sa->args[5] = frame->tf_r9; sa->code = frame->tf_rax; if (sa->code >= p->p_sysent->sv_size) /* nosys */ sa->callp = &p->p_sysent->sv_table[p->p_sysent->sv_size - 1]; else sa->callp = &p->p_sysent->sv_table[sa->code]; sa->narg = sa->callp->sy_narg; td->td_retval[0] = 0; return (0); } static void linux_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame = td->td_frame; /* * On Linux only %rcx and %r11 values are not preserved across * the syscall. * So, do not clobber %rdx and %r10 */ td->td_retval[1] = frame->tf_rdx; frame->tf_r10 = frame->tf_rcx; cpu_set_syscall_retval(td, error); /* Restore all registers. */ set_pcb_flags(td->td_pcb, PCB_FULL_IRET); } static int elf_linux_fixup(register_t **stack_base, struct image_params *imgp) { Elf_Auxargs *args; Elf_Addr *base; Elf_Addr *pos; struct ps_strings *arginfo; struct proc *p; int issetugid; p = imgp->proc; arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; KASSERT(curthread->td_proc == imgp->proc, ("unsafe elf_linux_fixup(), should be curproc")); base = (Elf64_Addr *)*stack_base; args = (Elf64_Auxargs *)imgp->auxargs; pos = base + (imgp->args->argc + imgp->args->envc + 2); issetugid = p->p_flag & P_SUGID ? 1 : 0; AUXARGS_ENTRY(pos, LINUX_AT_SYSINFO_EHDR, imgp->proc->p_sysent->sv_shared_page_base); AUXARGS_ENTRY(pos, LINUX_AT_HWCAP, cpu_feature); AUXARGS_ENTRY(pos, LINUX_AT_CLKTCK, stclohz); AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_BASE, args->base); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_UID, imgp->proc->p_ucred->cr_ruid); AUXARGS_ENTRY(pos, AT_EUID, imgp->proc->p_ucred->cr_svuid); AUXARGS_ENTRY(pos, AT_GID, imgp->proc->p_ucred->cr_rgid); AUXARGS_ENTRY(pos, AT_EGID, imgp->proc->p_ucred->cr_svgid); AUXARGS_ENTRY(pos, LINUX_AT_SECURE, issetugid); AUXARGS_ENTRY(pos, LINUX_AT_PLATFORM, PTROUT(linux_platform)); AUXARGS_ENTRY(pos, LINUX_AT_RANDOM, imgp->canary); if (imgp->execpathp != 0) AUXARGS_ENTRY(pos, LINUX_AT_EXECFN, imgp->execpathp); if (args->execfd != -1) AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; base--; suword(base, (uint64_t)imgp->args->argc); *stack_base = (register_t *)base; return (0); } /* * Copy strings out to the new process address space, constructing new arg * and env vector tables. Return a pointer to the base so that it can be used * as the initial stack pointer. */ static register_t * linux_copyout_strings(struct image_params *imgp) { int argc, envc; char **vectp; char *stringp, *destp; register_t *stack_base; struct ps_strings *arginfo; char canary[LINUX_AT_RANDOM_LEN]; size_t execpath_len; struct proc *p; /* * Calculate string base and vector table pointers. */ if (imgp->execpath != NULL && imgp->auxargs != NULL) execpath_len = strlen(imgp->execpath) + 1; else execpath_len = 0; p = imgp->proc; arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; destp = (caddr_t)arginfo - SPARE_USRSPACE - roundup(sizeof(canary), sizeof(char *)) - roundup(execpath_len, sizeof(char *)) - roundup(ARG_MAX - imgp->args->stringspace, sizeof(char *)); if (execpath_len != 0) { imgp->execpathp = (uintptr_t)arginfo - execpath_len; copyout(imgp->execpath, (void *)imgp->execpathp, execpath_len); } /* * Prepare the canary for SSP. */ arc4rand(canary, sizeof(canary), 0); imgp->canary = (uintptr_t)arginfo - roundup(execpath_len, sizeof(char *)) - roundup(sizeof(canary), sizeof(char *)); copyout(canary, (void *)imgp->canary, sizeof(canary)); /* * If we have a valid auxargs ptr, prepare some room * on the stack. */ if (imgp->auxargs) { /* * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for * lower compatibility. */ imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : (LINUX_AT_COUNT * 2); /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets,and imgp->auxarg_size is room * for argument of Runtime loader. */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2 + imgp->auxarg_size) * sizeof(char *)); } else { /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) * sizeof(char *)); } /* * vectp also becomes our initial stack base */ stack_base = (register_t *)vectp; stringp = imgp->args->begin_argv; argc = imgp->args->argc; envc = imgp->args->envc; /* * Copy out strings - arguments and environment. */ copyout(stringp, destp, ARG_MAX - imgp->args->stringspace); /* * Fill in "ps_strings" struct for ps, w, etc. */ suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); suword(&arginfo->ps_nargvstr, argc); /* * Fill in argument portion of vector table. */ for (; argc > 0; --argc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* a null vector table pointer separates the argp's from the envp's */ suword(vectp++, 0); suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); suword(&arginfo->ps_nenvstr, envc); /* * Fill in environment portion of vector table. */ for (; envc > 0; --envc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* end of vector table is a null pointer */ suword(vectp, 0); return (stack_base); } /* * Reset registers to default values on exec. */ static void linux_exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *regs = td->td_frame; struct pcb *pcb = td->td_pcb; mtx_lock(&dt_lock); if (td->td_proc->p_md.md_ldt != NULL) user_ldt_free(td); else mtx_unlock(&dt_lock); pcb->pcb_fsbase = 0; pcb->pcb_gsbase = 0; clear_pcb_flags(pcb, PCB_32BIT); pcb->pcb_initial_fpucw = __LINUX_NPXCW__; set_pcb_flags(pcb, PCB_FULL_IRET); bzero((char *)regs, sizeof(struct trapframe)); regs->tf_rip = imgp->entry_addr; regs->tf_rsp = stack; regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); regs->tf_ss = _udatasel; regs->tf_cs = _ucodesel; regs->tf_ds = _udatasel; regs->tf_es = _udatasel; regs->tf_fs = _ufssel; regs->tf_gs = _ugssel; regs->tf_flags = TF_HASSEGS; /* * Reset the hardware debug registers if they were in use. * They won't have any meaning for the newly exec'd process. */ if (pcb->pcb_flags & PCB_DBREGS) { pcb->pcb_dr0 = 0; pcb->pcb_dr1 = 0; pcb->pcb_dr2 = 0; pcb->pcb_dr3 = 0; pcb->pcb_dr6 = 0; pcb->pcb_dr7 = 0; if (pcb == curpcb) { /* * Clear the debug registers on the running * CPU, otherwise they will end up affecting * the next process we switch to. */ reset_dbregs(); } clear_pcb_flags(pcb, PCB_DBREGS); } /* * Drop the FP state if we hold it, so that the process gets a * clean FP state if it uses the FPU again. */ fpstate_drop(td); } /* * Copied from amd64/amd64/machdep.c * * XXX fpu state need? don't think so */ int linux_rt_sigreturn(struct thread *td, struct linux_rt_sigreturn_args *args) { struct proc *p; struct l_ucontext uc; struct l_sigcontext *context; struct trapframe *regs; unsigned long rflags; int error; ksiginfo_t ksi; regs = td->td_frame; error = copyin((void *)regs->tf_rbx, &uc, sizeof(uc)); if (error != 0) return (error); p = td->td_proc; context = &uc.uc_mcontext; rflags = context->sc_rflags; /* * Don't allow users to change privileged or reserved flags. */ /* * XXX do allow users to change the privileged flag PSL_RF. * The cpu sets PSL_RF in tf_rflags for faults. Debuggers * should sometimes set it there too. tf_rflags is kept in * the signal context during signal handling and there is no * other place to remember it, so the PSL_RF bit may be * corrupted by the signal handler without us knowing. * Corruption of the PSL_RF bit at worst causes one more or * one less debugger trap, so allowing it is fairly harmless. */ #define RFLAG_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) if (!RFLAG_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { printf("linux_rt_sigreturn: rflags = 0x%lx\n", rflags); return (EINVAL); } /* * Don't allow users to load a valid privileged %cs. Let the * hardware check for invalid selectors, excess privilege in * other selectors, invalid %eip's and invalid %esp's. */ #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) if (!CS_SECURE(context->sc_cs)) { printf("linux_rt_sigreturn: cs = 0x%x\n", context->sc_cs); ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; ksi.ksi_trapno = T_PROTFLT; ksi.ksi_addr = (void *)regs->tf_rip; trapsignal(td, &ksi); return (EINVAL); } PROC_LOCK(p); linux_to_bsd_sigset(&uc.uc_sigmask, &td->td_sigmask); SIG_CANTMASK(td->td_sigmask); signotify(td); PROC_UNLOCK(p); regs->tf_rdi = context->sc_rdi; regs->tf_rsi = context->sc_rsi; regs->tf_rdx = context->sc_rdx; regs->tf_rbp = context->sc_rbp; regs->tf_rbx = context->sc_rbx; regs->tf_rcx = context->sc_rcx; regs->tf_rax = context->sc_rax; regs->tf_rip = context->sc_rip; regs->tf_rsp = context->sc_rsp; regs->tf_r8 = context->sc_r8; regs->tf_r9 = context->sc_r9; regs->tf_r10 = context->sc_r10; regs->tf_r11 = context->sc_r11; regs->tf_r12 = context->sc_r12; regs->tf_r13 = context->sc_r13; regs->tf_r14 = context->sc_r14; regs->tf_r15 = context->sc_r15; regs->tf_cs = context->sc_cs; regs->tf_err = context->sc_err; regs->tf_rflags = rflags; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); return (EJUSTRETURN); } /* * copied from amd64/amd64/machdep.c * * Send an interrupt to process. */ static void linux_rt_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct l_rt_sigframe sf, *sfp; struct proc *p; struct thread *td; struct sigacts *psp; caddr_t sp; struct trapframe *regs; int sig, code; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; code = ksi->ksi_code; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->tf_rsp); LINUX_CTR4(rt_sendsig, "%p, %d, %p, %u", catcher, sig, mask, code); /* Allocate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = (caddr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct l_rt_sigframe); } else sp = (caddr_t)regs->tf_rsp - sizeof(struct l_rt_sigframe) - 128; /* Align to 16 bytes. */ sfp = (struct l_rt_sigframe *)((unsigned long)sp & ~0xFul); mtx_unlock(&psp->ps_mtx); /* Translate the signal. */ sig = bsd_to_linux_signal(sig); /* Save user context. */ bzero(&sf, sizeof(sf)); bsd_to_linux_sigset(mask, &sf.sf_sc.uc_sigmask); bsd_to_linux_sigset(mask, &sf.sf_sc.uc_mcontext.sc_mask); sf.sf_sc.uc_stack.ss_sp = PTROUT(td->td_sigstk.ss_sp); sf.sf_sc.uc_stack.ss_size = td->td_sigstk.ss_size; sf.sf_sc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? LINUX_SS_ONSTACK : 0) : LINUX_SS_DISABLE; PROC_UNLOCK(p); sf.sf_sc.uc_mcontext.sc_rdi = regs->tf_rdi; sf.sf_sc.uc_mcontext.sc_rsi = regs->tf_rsi; sf.sf_sc.uc_mcontext.sc_rdx = regs->tf_rdx; sf.sf_sc.uc_mcontext.sc_rbp = regs->tf_rbp; sf.sf_sc.uc_mcontext.sc_rbx = regs->tf_rbx; sf.sf_sc.uc_mcontext.sc_rcx = regs->tf_rcx; sf.sf_sc.uc_mcontext.sc_rax = regs->tf_rax; sf.sf_sc.uc_mcontext.sc_rip = regs->tf_rip; sf.sf_sc.uc_mcontext.sc_rsp = regs->tf_rsp; sf.sf_sc.uc_mcontext.sc_r8 = regs->tf_r8; sf.sf_sc.uc_mcontext.sc_r9 = regs->tf_r9; sf.sf_sc.uc_mcontext.sc_r10 = regs->tf_r10; sf.sf_sc.uc_mcontext.sc_r11 = regs->tf_r11; sf.sf_sc.uc_mcontext.sc_r12 = regs->tf_r12; sf.sf_sc.uc_mcontext.sc_r13 = regs->tf_r13; sf.sf_sc.uc_mcontext.sc_r14 = regs->tf_r14; sf.sf_sc.uc_mcontext.sc_r15 = regs->tf_r15; sf.sf_sc.uc_mcontext.sc_cs = regs->tf_cs; sf.sf_sc.uc_mcontext.sc_rflags = regs->tf_rflags; sf.sf_sc.uc_mcontext.sc_err = regs->tf_err; sf.sf_sc.uc_mcontext.sc_trapno = bsd_to_linux_trapcode(code); sf.sf_sc.uc_mcontext.sc_cr2 = (register_t)ksi->ksi_addr; /* Build the argument list for the signal handler. */ regs->tf_rdi = sig; /* arg 1 in %rdi */ regs->tf_rax = 0; regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ regs->tf_rdx = (register_t)&sfp->sf_sc; /* arg 3 in %rdx */ sf.sf_handler = catcher; /* Fill in POSIX parts */ ksiginfo_to_lsiginfo(ksi, &sf.sf_si, sig); /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { #ifdef DEBUG printf("process %ld has trashed its stack\n", (long)p->p_pid); #endif PROC_LOCK(p); sigexit(td, SIGILL); } regs->tf_rsp = (long)sfp; regs->tf_rip = linux_rt_sigcode; regs->tf_rflags &= ~(PSL_T | PSL_D); regs->tf_cs = _ucodesel; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } /* * If a linux binary is exec'ing something, try this image activator * first. We override standard shell script execution in order to * be able to modify the interpreter path. We only do this if a linux * binary is doing the exec, so we do not create an EXEC module for it. */ static int exec_linux_imgact_try(struct image_params *iparams); static int exec_linux_imgact_try(struct image_params *imgp) { const char *head = (const char *)imgp->image_header; char *rpath; - int error = -1, len; + int error = -1; /* * The interpreter for shell scripts run from a linux binary needs * to be located in /compat/linux if possible in order to recursively * maintain linux path emulation. */ if (((const short *)head)[0] == SHELLMAGIC) { /* * Run our normal shell image activator. If it succeeds * attempt to use the alternate path for the interpreter. * If an alternate path is found, use our stringspace * to store it. */ if ((error = exec_shell_imgact(imgp)) == 0) { linux_emul_convpath(FIRST_THREAD_IN_PROC(imgp->proc), imgp->interpreter_name, UIO_SYSSPACE, &rpath, 0, AT_FDCWD); - if (rpath != NULL) { - len = strlen(rpath) + 1; - - if (len <= MAXSHELLCMDLEN) - memcpy(imgp->interpreter_name, - rpath, len); - free(rpath, M_TEMP); - } + if (rpath != NULL) + imgp->args->fname_buf = + imgp->interpreter_name = rpath; } } - return(error); + return (error); } #define LINUX_VSYSCALL_START (-10UL << 20) #define LINUX_VSYSCALL_SZ 1024 const unsigned long linux_vsyscall_vector[] = { LINUX_SYS_gettimeofday, LINUX_SYS_linux_time, /* getcpu not implemented */ }; static int linux_vsyscall(struct thread *td) { struct trapframe *frame; uint64_t retqaddr; int code, traced; int error; frame = td->td_frame; /* Check %rip for vsyscall area */ if (__predict_true(frame->tf_rip < LINUX_VSYSCALL_START)) return (EINVAL); if ((frame->tf_rip & (LINUX_VSYSCALL_SZ - 1)) != 0) return (EINVAL); code = (frame->tf_rip - LINUX_VSYSCALL_START) / LINUX_VSYSCALL_SZ; if (code >= nitems(linux_vsyscall_vector)) return (EINVAL); /* * vsyscall called as callq *(%rax), so we must * use return address from %rsp and also fixup %rsp */ error = copyin((void *)frame->tf_rsp, &retqaddr, sizeof(retqaddr)); if (error) return (error); frame->tf_rip = retqaddr; frame->tf_rax = linux_vsyscall_vector[code]; frame->tf_rsp += 8; traced = (frame->tf_flags & PSL_T); amd64_syscall(td, traced); return (0); } struct sysentvec elf_linux_sysvec = { .sv_size = LINUX_SYS_MAXSYSCALL, .sv_table = linux_sysent, .sv_mask = 0, .sv_errsize = ELAST + 1, .sv_errtbl = bsd_to_linux_errno, .sv_transtrap = translate_traps, .sv_fixup = elf_linux_fixup, .sv_sendsig = linux_rt_sendsig, .sv_sigcode = &_binary_linux_locore_o_start, .sv_szsigcode = &linux_szsigcode, .sv_name = "Linux ELF64", .sv_coredump = elf64_coredump, .sv_imgact_try = exec_linux_imgact_try, .sv_minsigstksz = LINUX_MINSIGSTKSZ, .sv_pagesize = PAGE_SIZE, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_stackprot = VM_PROT_ALL, .sv_copyout_strings = linux_copyout_strings, .sv_setregs = linux_exec_setregs, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = SV_ABI_LINUX | SV_LP64 | SV_SHP, .sv_set_syscall_retval = linux_set_syscall_retval, .sv_fetch_syscall_args = linux_fetch_syscall_args, .sv_syscallnames = NULL, .sv_shared_page_base = SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = linux_schedtail, .sv_thread_detach = linux_thread_detach, .sv_trap = linux_vsyscall, }; static void linux_vdso_install(void *param) { linux_szsigcode = (&_binary_linux_locore_o_end - &_binary_linux_locore_o_start); if (linux_szsigcode > elf_linux_sysvec.sv_shared_page_len) panic("Linux invalid vdso size\n"); __elfN(linux_vdso_fixup)(&elf_linux_sysvec); linux_shared_page_obj = __elfN(linux_shared_page_init) (&linux_shared_page_mapping); __elfN(linux_vdso_reloc)(&elf_linux_sysvec, SHAREDPAGE); bcopy(elf_linux_sysvec.sv_sigcode, linux_shared_page_mapping, linux_szsigcode); elf_linux_sysvec.sv_shared_page_obj = linux_shared_page_obj; linux_kplatform = linux_shared_page_mapping + (linux_platform - (caddr_t)SHAREDPAGE); } SYSINIT(elf_linux_vdso_init, SI_SUB_EXEC, SI_ORDER_ANY, (sysinit_cfunc_t)linux_vdso_install, NULL); static void linux_vdso_deinstall(void *param) { __elfN(linux_shared_page_fini)(linux_shared_page_obj); }; SYSUNINIT(elf_linux_vdso_uninit, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)linux_vdso_deinstall, NULL); static char GNULINUX_ABI_VENDOR[] = "GNU"; static int GNULINUX_ABI_DESC = 0; static boolean_t linux_trans_osrel(const Elf_Note *note, int32_t *osrel) { const Elf32_Word *desc; uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); desc = (const Elf32_Word *)p; if (desc[0] != GNULINUX_ABI_DESC) return (FALSE); /* * For linux we encode osrel as follows (see linux_mib.c): * VVVMMMIII (version, major, minor), see linux_mib.c. */ *osrel = desc[1] * 1000000 + desc[2] * 1000 + desc[3]; return (TRUE); } static Elf_Brandnote linux64_brandnote = { .hdr.n_namesz = sizeof(GNULINUX_ABI_VENDOR), .hdr.n_descsz = 16, .hdr.n_type = 1, .vendor = GNULINUX_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = linux_trans_osrel }; static Elf64_Brandinfo linux_glibc2brand = { .brand = ELFOSABI_LINUX, .machine = EM_X86_64, .compat_3_brand = "Linux", .emul_path = "/compat/linux", .interp_path = "/lib64/ld-linux-x86-64.so.2", .sysvec = &elf_linux_sysvec, .interp_newpath = NULL, .brand_note = &linux64_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; static Elf64_Brandinfo linux_glibc2brandshort = { .brand = ELFOSABI_LINUX, .machine = EM_X86_64, .compat_3_brand = "Linux", .emul_path = "/compat/linux", .interp_path = "/lib64/ld-linux.so.2", .sysvec = &elf_linux_sysvec, .interp_newpath = NULL, .brand_note = &linux64_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; Elf64_Brandinfo *linux_brandlist[] = { &linux_glibc2brand, &linux_glibc2brandshort, NULL }; static int linux64_elf_modevent(module_t mod, int type, void *data) { Elf64_Brandinfo **brandinfo; int error; struct linux_ioctl_handler **lihp; error = 0; switch(type) { case MOD_LOAD: for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL; ++brandinfo) if (elf64_insert_brand_entry(*brandinfo) < 0) error = EINVAL; if (error == 0) { SET_FOREACH(lihp, linux_ioctl_handler_set) linux_ioctl_register_handler(*lihp); LIST_INIT(&futex_list); mtx_init(&futex_mtx, "ftllk64", NULL, MTX_DEF); stclohz = (stathz ? stathz : hz); if (bootverbose) printf("Linux x86-64 ELF exec handler installed\n"); } else printf("cannot insert Linux x86-64 ELF brand handler\n"); break; case MOD_UNLOAD: for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL; ++brandinfo) if (elf64_brand_inuse(*brandinfo)) error = EBUSY; if (error == 0) { for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL; ++brandinfo) if (elf64_remove_brand_entry(*brandinfo) < 0) error = EINVAL; } if (error == 0) { SET_FOREACH(lihp, linux_ioctl_handler_set) linux_ioctl_unregister_handler(*lihp); mtx_destroy(&futex_mtx); if (bootverbose) printf("Linux ELF exec handler removed\n"); } else printf("Could not deinstall ELF interpreter entry\n"); break; default: return (EOPNOTSUPP); } return (error); } static moduledata_t linux64_elf_mod = { "linux64elf", linux64_elf_modevent, 0 }; DECLARE_MODULE_TIED(linux64elf, linux64_elf_mod, SI_SUB_EXEC, SI_ORDER_ANY); MODULE_DEPEND(linux64elf, linux_common, 1, 1, 1); FEATURE(linux64, "Linux 64bit support"); Index: projects/clang400-import/sys/arm/freescale/imx/imx_machdep.c =================================================================== --- projects/clang400-import/sys/arm/freescale/imx/imx_machdep.c (revision 312719) +++ projects/clang400-import/sys/arm/freescale/imx/imx_machdep.c (revision 312720) @@ -1,105 +1,112 @@ /*- * Copyright (c) 2013 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, imx, CTLFLAG_RW, NULL, "i.MX container"); static int last_reset_status; SYSCTL_UINT(_hw_imx, OID_AUTO, last_reset_status, CTLFLAG_RD, &last_reset_status, 0, "Last reset status register"); SYSCTL_STRING(_hw_imx, OID_AUTO, last_reset_reason, CTLFLAG_RD, "unknown", 0, "Last reset reason"); /* * This code which manipulates the watchdog hardware is here to implement * cpu_reset() because the watchdog is the only way for software to reset the * chip. Why here and not in imx_wdog.c? Because there's no requirement that * the watchdog driver be compiled in, but it's nice to be able to reboot even * if it's not. */ void imx_wdog_cpu_reset(vm_offset_t wdcr_physaddr) { volatile uint16_t * pcr; /* * Trigger an immediate reset by clearing the SRS bit in the watchdog * control register. The reset happens on the next cycle of the wdog * 32KHz clock, so hang out in a spin loop until the reset takes effect. + * + * Imx6 erratum ERR004346 says the SRS bit has to be cleared twice + * within the same cycle of the 32khz clock to reliably trigger the + * reset. Writing it 3 times in a row ensures at least 2 of the writes + * happen in the same 32k clock cycle. */ if ((pcr = devmap_ptov(wdcr_physaddr, sizeof(*pcr))) == NULL) { printf("cpu_reset() can't find its control register... locking up now."); } else { + *pcr &= ~WDOG_CR_SRS; + *pcr &= ~WDOG_CR_SRS; *pcr &= ~WDOG_CR_SRS; } for (;;) continue; } void imx_wdog_init_last_reset(vm_offset_t wdsr_phys) { volatile uint16_t * psr; if ((psr = devmap_ptov(wdsr_phys, sizeof(*psr))) == NULL) return; last_reset_status = *psr; if (last_reset_status & WDOG_RSR_SFTW) { sysctl___hw_imx_last_reset_reason.oid_arg1 = "SoftwareReset"; } else if (last_reset_status & WDOG_RSR_TOUT) { sysctl___hw_imx_last_reset_reason.oid_arg1 = "WatchdogTimeout"; } else if (last_reset_status & WDOG_RSR_POR) { sysctl___hw_imx_last_reset_reason.oid_arg1 = "PowerOnReset"; } } u_int imx_soc_family(void) { return (imx_soc_type() >> IMXSOC_FAMSHIFT); } Index: projects/clang400-import/sys/arm/ti/cpsw/if_cpsw.c =================================================================== --- projects/clang400-import/sys/arm/ti/cpsw/if_cpsw.c (revision 312719) +++ projects/clang400-import/sys/arm/ti/cpsw/if_cpsw.c (revision 312720) @@ -1,2968 +1,2987 @@ /*- * Copyright (c) 2012 Damjan Marion * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * TI Common Platform Ethernet Switch (CPSW) Driver * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. * * This controller is documented in the AM335x Technical Reference * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. * * It is basically a single Ethernet port (port 0) wired internally to * a 3-port store-and-forward switch connected to two independent * "sliver" controllers (port 1 and port 2). You can operate the * controller in a variety of different ways by suitably configuring * the slivers and the Address Lookup Engine (ALE) that routes packets * between the ports. * * This code was developed and tested on a BeagleBone with * an AM335x SoC. */ #include __FBSDID("$FreeBSD$"); #include "opt_cpsw.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CPSW_ETHERSWITCH #include #include "etherswitch_if.h" #endif #include "if_cpswreg.h" #include "if_cpswvar.h" #include "miibus_if.h" /* Device probe/attach/detach. */ static int cpsw_probe(device_t); static int cpsw_attach(device_t); static int cpsw_detach(device_t); static int cpswp_probe(device_t); static int cpswp_attach(device_t); static int cpswp_detach(device_t); static phandle_t cpsw_get_node(device_t, device_t); /* Device Init/shutdown. */ static int cpsw_shutdown(device_t); static void cpswp_init(void *); static void cpswp_init_locked(void *); static void cpswp_stop_locked(struct cpswp_softc *); /* Device Suspend/Resume. */ static int cpsw_suspend(device_t); static int cpsw_resume(device_t); /* Ioctl. */ static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); static int cpswp_miibus_readreg(device_t, int phy, int reg); static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); static void cpswp_miibus_statchg(device_t); /* Send/Receive packets. */ static void cpsw_intr_rx(void *arg); static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); static void cpsw_rx_enqueue(struct cpsw_softc *); static void cpswp_start(struct ifnet *); static void cpsw_intr_tx(void *); static void cpswp_tx_enqueue(struct cpswp_softc *); static int cpsw_tx_dequeue(struct cpsw_softc *); /* Misc interrupts and watchdog. */ static void cpsw_intr_rx_thresh(void *); static void cpsw_intr_misc(void *); static void cpswp_tick(void *); static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int cpswp_ifmedia_upd(struct ifnet *); static void cpsw_tx_watchdog(void *); /* ALE support */ static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); static void cpsw_ale_dump_table(struct cpsw_softc *); static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, int); static int cpswp_ale_update_addresses(struct cpswp_softc *, int); /* Statistics and sysctls. */ static void cpsw_add_sysctls(struct cpsw_softc *); static void cpsw_stats_collect(struct cpsw_softc *); static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); #ifdef CPSW_ETHERSWITCH static etherswitch_info_t *cpsw_getinfo(device_t); static int cpsw_getport(device_t, etherswitch_port_t *); static int cpsw_setport(device_t, etherswitch_port_t *); static int cpsw_getconf(device_t, etherswitch_conf_t *); static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); static int cpsw_readreg(device_t, int); static int cpsw_writereg(device_t, int, int); static int cpsw_readphy(device_t, int, int); static int cpsw_writephy(device_t, int, int, int); #endif /* * Arbitrary limit on number of segments in an mbuf to be transmitted. * Packets with more segments than this will be defragmented before * they are queued. */ #define CPSW_TXFRAGS 16 /* Shared resources. */ static device_method_t cpsw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpsw_probe), DEVMETHOD(device_attach, cpsw_attach), DEVMETHOD(device_detach, cpsw_detach), DEVMETHOD(device_shutdown, cpsw_shutdown), DEVMETHOD(device_suspend, cpsw_suspend), DEVMETHOD(device_resume, cpsw_resume), /* Bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, cpsw_get_node), #ifdef CPSW_ETHERSWITCH /* etherswitch interface */ DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), DEVMETHOD(etherswitch_readreg, cpsw_readreg), DEVMETHOD(etherswitch_writereg, cpsw_writereg), DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), DEVMETHOD(etherswitch_getport, cpsw_getport), DEVMETHOD(etherswitch_setport, cpsw_setport), DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), DEVMETHOD(etherswitch_getconf, cpsw_getconf), #endif DEVMETHOD_END }; static driver_t cpsw_driver = { "cpswss", cpsw_methods, sizeof(struct cpsw_softc), }; static devclass_t cpsw_devclass; DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0); /* Port/Slave resources. */ static device_method_t cpswp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpswp_probe), DEVMETHOD(device_attach, cpswp_attach), DEVMETHOD(device_detach, cpswp_detach), /* MII interface */ DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), DEVMETHOD_END }; static driver_t cpswp_driver = { "cpsw", cpswp_methods, sizeof(struct cpswp_softc), }; static devclass_t cpswp_devclass; #ifdef CPSW_ETHERSWITCH DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, etherswitch_devclass, 0, 0); MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); #endif DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0); DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(cpsw, ether, 1, 1, 1); MODULE_DEPEND(cpsw, miibus, 1, 1, 1); #ifdef CPSW_ETHERSWITCH static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; #endif static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; static struct resource_spec irq_res_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct { void (*cb)(void *); } cpsw_intr_cb[] = { { cpsw_intr_rx_thresh }, { cpsw_intr_rx }, { cpsw_intr_tx }, { cpsw_intr_misc }, }; /* Number of entries here must match size of stats * array in struct cpswp_softc. */ static struct cpsw_stat { int reg; char *oid; } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { {0x00, "GoodRxFrames"}, {0x04, "BroadcastRxFrames"}, {0x08, "MulticastRxFrames"}, {0x0C, "PauseRxFrames"}, {0x10, "RxCrcErrors"}, {0x14, "RxAlignErrors"}, {0x18, "OversizeRxFrames"}, {0x1c, "RxJabbers"}, {0x20, "ShortRxFrames"}, {0x24, "RxFragments"}, {0x30, "RxOctets"}, {0x34, "GoodTxFrames"}, {0x38, "BroadcastTxFrames"}, {0x3c, "MulticastTxFrames"}, {0x40, "PauseTxFrames"}, {0x44, "DeferredTxFrames"}, {0x48, "CollisionsTxFrames"}, {0x4c, "SingleCollisionTxFrames"}, {0x50, "MultipleCollisionTxFrames"}, {0x54, "ExcessiveCollisions"}, {0x58, "LateCollisions"}, {0x5c, "TxUnderrun"}, {0x60, "CarrierSenseErrors"}, {0x64, "TxOctets"}, {0x68, "RxTx64OctetFrames"}, {0x6c, "RxTx65to127OctetFrames"}, {0x70, "RxTx128to255OctetFrames"}, {0x74, "RxTx256to511OctetFrames"}, {0x78, "RxTx512to1024OctetFrames"}, {0x7c, "RxTx1024upOctetFrames"}, {0x80, "NetOctets"}, {0x84, "RxStartOfFrameOverruns"}, {0x88, "RxMiddleOfFrameOverruns"}, {0x8c, "RxDmaOverruns"} }; /* * Basic debug support. */ static void cpsw_debugf_head(const char *funcname) { int t = (int)(time_second % (24 * 60 * 60)); printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); } static void cpsw_debugf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } #define CPSW_DEBUGF(_sc, a) do { \ if ((_sc)->debug) { \ cpsw_debugf_head(__func__); \ cpsw_debugf a; \ } \ } while (0) /* * Locking macros */ #define CPSW_TX_LOCK(sc) do { \ mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->tx.lock); \ } while (0) #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) #define CPSW_RX_LOCK(sc) do { \ mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->rx.lock); \ } while (0) #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) #define CPSW_PORT_LOCK(_sc) do { \ mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ mtx_lock(&(_sc)->lock); \ } while (0) #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) /* * Read/Write macros */ #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) #define cpsw_write_4(_sc, _reg, _val) \ bus_write_4((_sc)->mem_res, (_reg), (_val)) #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) #define cpsw_cpdma_bd_paddr(sc, slot) \ BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) #define cpsw_cpdma_read_bd(sc, slot, val) \ bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd(sc, slot, val) \ bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ bus_write_2(sc->mem_res, slot->bd_offset + 14, val) #define cpsw_cpdma_read_bd_flags(sc, slot) \ bus_read_2(sc->mem_res, slot->bd_offset + 14) #define cpsw_write_hdp_slot(sc, queue, slot) \ cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) #define cpsw_read_cp(sc, queue) \ cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) #define cpsw_write_cp(sc, queue, val) \ cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) #define cpsw_write_cp_slot(sc, queue, slot) \ cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) #if 0 /* XXX temporary function versions for debugging. */ static void cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t reg = queue->hdp_offset; uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); cpsw_write_4(sc, reg, v); } static void cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); cpsw_write_cp(sc, queue, v); } #endif /* * Expanded dump routines for verbose debugging. */ static void cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", "Port0"}; struct cpsw_cpdma_bd bd; const char *sep; int i; cpsw_cpdma_read_bd(sc, slot, &bd); printf("BD Addr : 0x%08x Next : 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); printf(" Flags: "); sep = ""; for (i = 0; i < 16; ++i) { if (bd.flags & (1 << (15 - i))) { printf("%s%s", sep, flags[i]); sep = ","; } } printf("\n"); if (slot->mbuf) { printf(" Ether: %14D\n", (char *)(slot->mbuf->m_data), " "); printf(" Packet: %16D\n", (char *)(slot->mbuf->m_data) + 14, " "); } } #define CPSW_DUMP_SLOT(cs, slot) do { \ IF_DEBUG(sc) { \ cpsw_dump_slot(sc, slot); \ } \ } while (0) static void cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) { struct cpsw_slot *slot; int i = 0; int others = 0; STAILQ_FOREACH(slot, q, next) { if (i > CPSW_TXFRAGS) ++others; else cpsw_dump_slot(sc, slot); ++i; } if (others) printf(" ... and %d more.\n", others); printf("\n"); } #define CPSW_DUMP_QUEUE(sc, q) do { \ IF_DEBUG(sc) { \ cpsw_dump_queue(sc, q); \ } \ } while (0) static void cpsw_init_slots(struct cpsw_softc *sc) { struct cpsw_slot *slot; int i; STAILQ_INIT(&sc->avail); /* Put the slot descriptors onto the global avail list. */ for (i = 0; i < nitems(sc->_slots); i++) { slot = &sc->_slots[i]; slot->bd_offset = cpsw_cpdma_bd_offset(i); STAILQ_INSERT_TAIL(&sc->avail, slot, next); } } static int cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) { const int max_slots = nitems(sc->_slots); struct cpsw_slot *slot; int i; if (requested < 0) requested = max_slots; for (i = 0; i < requested; ++i) { slot = STAILQ_FIRST(&sc->avail); if (slot == NULL) return (0); if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { device_printf(sc->dev, "failed to create dmamap\n"); return (ENOMEM); } STAILQ_REMOVE_HEAD(&sc->avail, next); STAILQ_INSERT_TAIL(&queue->avail, slot, next); ++queue->avail_queue_len; ++queue->queue_slots; } return (0); } static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { int error; if (slot->dmamap) { if (slot->mbuf) bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); KASSERT(error == 0, ("Mapping still active")); slot->dmamap = NULL; } if (slot->mbuf) { m_freem(slot->mbuf); slot->mbuf = NULL; } } static void cpsw_reset(struct cpsw_softc *sc) { int i; callout_stop(&sc->watchdog.callout); /* Reset RMII/RGMII wrapper. */ cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) ; /* Disable TX and RX interrupts for all cores. */ for (i = 0; i < 3; ++i) { cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); } /* Reset CPSW subsystem. */ cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) ; /* Reset Sliver port 1 and 2 */ for (i = 0; i < 2; i++) { /* Reset */ cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) ; } /* Reset DMA controller. */ cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) ; /* Disable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); /* Clear all queues. */ for (i = 0; i < 8; i++) { cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); } /* Clear all interrupt Masks */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); } static void cpsw_init(struct cpsw_softc *sc) { struct cpsw_slot *slot; uint32_t reg; /* Disable the interrupt pacing. */ reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); /* Clear ALE */ cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); /* Enable ALE */ reg = CPSW_ALE_CTL_ENABLE; if (sc->dualemac) reg |= CPSW_ALE_CTL_VLAN_AWARE; cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); /* Set Host Port Mapping. */ cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); /* Initialize ALE: set host port to forwarding(3). */ cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpsw_write_4(sc, CPSW_SS_PTYPE, 0); /* Enable statistics for ports 0, 1 and 2 */ cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); /* Turn off flow control. */ cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); /* Make IP hdr aligned with 4 */ cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); /* Initialize RX Buffer Descriptors */ cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); /* Enable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); /* Enable Interrupts for core 0 */ cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); /* Enable host Error Interrupt */ cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); /* Enable interrupts for RX and TX on Channel 0 */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); /* Select MII in GMII_SEL, Internal Delay mode */ //ti_scm_reg_write_4(0x650, 0); /* Initialize active queues. */ slot = STAILQ_FIRST(&sc->tx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->tx, slot); slot = STAILQ_FIRST(&sc->rx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->rx, slot); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); /* Activate network interface. */ sc->rx.running = 1; sc->tx.running = 1; sc->watchdog.timer = 0; callout_init(&sc->watchdog.callout, 0); callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * Device Probe, Attach, Detach. * */ static int cpsw_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,cpsw")) return (ENXIO); device_set_desc(dev, "3-port Switch Ethernet Subsystem"); return (BUS_PROBE_DEFAULT); } static int cpsw_intr_attach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (bus_setup_intr(sc->dev, sc->irq_res[i], INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { return (-1); } } return (0); } static void cpsw_intr_detach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (sc->ih_cookie[i]) { bus_teardown_intr(sc->dev, sc->irq_res[i], sc->ih_cookie[i]); } } } static int cpsw_get_fdt_data(struct cpsw_softc *sc, int port) { char *name; int len, phy, vlan; pcell_t phy_id[3], vlan_id; phandle_t child; unsigned long mdio_child_addr; /* Find any slave with phy_id */ phy = -1; vlan = -1; for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0) continue; if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) { OF_prop_free(name); continue; } OF_prop_free(name); if (mdio_child_addr != slave_mdio_addr[port]) continue; len = OF_getproplen(child, "phy_id"); if (len / sizeof(pcell_t) == 2) { /* Get phy address from fdt */ if (OF_getencprop(child, "phy_id", phy_id, len) > 0) phy = phy_id[1]; } len = OF_getproplen(child, "dual_emac_res_vlan"); if (len / sizeof(pcell_t) == 1) { /* Get phy address from fdt */ if (OF_getencprop(child, "dual_emac_res_vlan", &vlan_id, len) > 0) { vlan = vlan_id; } } break; } if (phy == -1) return (ENXIO); sc->port[port].phy = phy; sc->port[port].vlan = vlan; return (0); } static int cpsw_attach(device_t dev) { int error, i; struct cpsw_softc *sc; uint32_t reg; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(dev); getbinuptime(&sc->attach_uptime); if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, sizeof(sc->active_slave)) <= 0) { sc->active_slave = 0; } if (sc->active_slave > 1) sc->active_slave = 1; if (OF_hasprop(sc->node, "dual_emac")) sc->dualemac = 1; for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; if (cpsw_get_fdt_data(sc, i) != 0) { device_printf(dev, "failed to get PHY address from FDT\n"); return (ENXIO); } } /* Initialize mutexes */ mtx_init(&sc->tx.lock, device_get_nameunit(dev), "cpsw TX lock", MTX_DEF); mtx_init(&sc->rx.lock, device_get_nameunit(dev), "cpsw RX lock", MTX_DEF); /* Allocate IRQ resources */ error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); if (error) { device_printf(dev, "could not allocate IRQ resources\n"); cpsw_detach(dev); return (ENXIO); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(sc->dev, "failed to allocate memory resource\n"); cpsw_detach(dev); return (ENXIO); } reg = cpsw_read_4(sc, CPSW_SS_IDVER); device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), reg & 0xFF, (reg >> 11) & 0x1F); cpsw_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for mbufs. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->mbuf_dtag); /* dmatag */ if (error) { device_printf(dev, "bus_dma_tag_create failed\n"); cpsw_detach(dev); return (error); } /* Allocate a NULL buffer for padding. */ sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO); cpsw_init_slots(sc); /* Allocate slots to TX and RX queues. */ STAILQ_INIT(&sc->rx.avail); STAILQ_INIT(&sc->rx.active); STAILQ_INIT(&sc->tx.avail); STAILQ_INIT(&sc->tx.active); // For now: 128 slots to TX, rest to RX. // XXX TODO: start with 32/64 and grow dynamically based on demand. if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { device_printf(dev, "failed to allocate dmamaps\n"); cpsw_detach(dev); return (ENOMEM); } device_printf(dev, "Initial queue size TX=%d RX=%d\n", sc->tx.queue_slots, sc->rx.queue_slots); sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); if (cpsw_intr_attach(sc) == -1) { device_printf(dev, "failed to setup interrupts\n"); cpsw_detach(dev); return (ENXIO); } #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) cpsw_vgroups[i].vid = -1; #endif /* Reset the controller. */ cpsw_reset(sc); cpsw_init(sc); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; sc->port[i].dev = device_add_child(dev, "cpsw", i); if (sc->port[i].dev == NULL) { cpsw_detach(dev); return (ENXIO); } } bus_generic_probe(dev); bus_generic_attach(dev); return (0); } static int cpsw_detach(device_t dev) { struct cpsw_softc *sc; int error, i; bus_generic_detach(dev); sc = device_get_softc(dev); for (i = 0; i < CPSW_PORTS; i++) { if (sc->port[i].dev) device_delete_child(dev, sc->port[i].dev); } if (device_is_attached(dev)) { callout_stop(&sc->watchdog.callout); callout_drain(&sc->watchdog.callout); } /* Stop and release all interrupts */ cpsw_intr_detach(sc); /* Free dmamaps and mbufs */ for (i = 0; i < nitems(sc->_slots); ++i) cpsw_free_slot(sc, &sc->_slots[i]); /* Free null padding buffer. */ if (sc->nullpad) free(sc->nullpad, M_DEVBUF); /* Free DMA tag */ if (sc->mbuf_dtag) { error = bus_dma_tag_destroy(sc->mbuf_dtag); KASSERT(error == 0, ("Unable to destroy DMA tag")); } /* Free IO memory handler */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); bus_release_resources(dev, irq_res_spec, sc->irq_res); /* Destroy mutexes */ mtx_destroy(&sc->rx.lock); mtx_destroy(&sc->tx.lock); /* Detach the switch device, if present. */ error = bus_generic_detach(dev); if (error != 0) return (error); return (device_delete_children(dev)); } static phandle_t cpsw_get_node(device_t bus, device_t dev) { /* Share controller node with port device. */ return (ofw_bus_get_node(bus)); } static int cpswp_probe(device_t dev) { if (device_get_unit(dev) > 1) { device_printf(dev, "Only two ports are supported.\n"); return (ENXIO); } device_set_desc(dev, "Ethernet Switch Port"); return (BUS_PROBE_DEFAULT); } static int cpswp_attach(device_t dev) { int error; struct ifnet *ifp; struct cpswp_softc *sc; uint32_t reg; uint8_t mac_addr[ETHER_ADDR_LEN]; sc = device_get_softc(dev); sc->dev = dev; sc->pdev = device_get_parent(dev); sc->swsc = device_get_softc(sc->pdev); sc->unit = device_get_unit(dev); sc->phy = sc->swsc->port[sc->unit].phy; sc->vlan = sc->swsc->port[sc->unit].vlan; if (sc->swsc->dualemac && sc->vlan == -1) sc->vlan = sc->unit + 1; if (sc->unit == 0) { sc->physel = MDIOUSERPHYSEL0; sc->phyaccess = MDIOUSERACCESS0; } else { sc->physel = MDIOUSERPHYSEL1; sc->phyaccess = MDIOUSERACCESS1; } mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", MTX_DEF); /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { cpswp_detach(dev); return (ENXIO); } if_initname(ifp, device_get_name(sc->dev), sc->unit); ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? ifp->if_capenable = ifp->if_capabilities; ifp->if_init = cpswp_init; ifp->if_start = cpswp_start; ifp->if_ioctl = cpswp_ioctl; ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, ®); mac_addr[0] = reg & 0xFF; mac_addr[1] = (reg >> 8) & 0xFF; mac_addr[2] = (reg >> 16) & 0xFF; mac_addr[3] = (reg >> 24) & 0xFF; /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, ®); mac_addr[4] = reg & 0xFF; mac_addr[5] = (reg >> 8) & 0xFF; error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); if (error) { device_printf(dev, "attaching PHYs failed\n"); cpswp_detach(dev); return (error); } sc->mii = device_get_softc(sc->miibus); /* Select PHY and enable interrupts */ cpsw_write_4(sc->swsc, sc->physel, MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); ether_ifattach(sc->ifp, mac_addr); callout_init(&sc->mii_callout, 0); return (0); } static int cpswp_detach(device_t dev) { struct cpswp_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); if (device_is_attached(dev)) { ether_ifdetach(sc->ifp); CPSW_PORT_LOCK(sc); cpswp_stop_locked(sc); CPSW_PORT_UNLOCK(sc); callout_drain(&sc->mii_callout); } bus_generic_detach(dev); if_free(sc->ifp); mtx_destroy(&sc->lock); return (0); } /* * * Init/Shutdown. * */ static int cpsw_ports_down(struct cpsw_softc *sc) { struct cpswp_softc *psc; struct ifnet *ifp1, *ifp2; if (!sc->dualemac) return (1); psc = device_get_softc(sc->port[0].dev); ifp1 = psc->ifp; psc = device_get_softc(sc->port[1].dev); ifp2 = psc->ifp; if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) return (1); return (0); } static void cpswp_init(void *arg) { struct cpswp_softc *sc = arg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); cpswp_init_locked(arg); CPSW_PORT_UNLOCK(sc); } static void cpswp_init_locked(void *arg) { #ifdef CPSW_ETHERSWITCH int i; #endif struct cpswp_softc *sc = arg; struct ifnet *ifp; uint32_t reg; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); ifp = sc->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; getbinuptime(&sc->init_uptime); if (!sc->swsc->rx.running && !sc->swsc->tx.running) { /* Reset the controller. */ cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } /* Set Slave Mapping. */ cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 0x33221100); cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); /* Enable MAC RX/TX modules. */ /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg |= CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); /* Initialize ALE: set port to forwarding, initialize addrs */ cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); cpswp_ale_update_addresses(sc, 1); if (sc->swsc->dualemac) { /* Set Port VID. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), sc->vlan & 0xfff); cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ #ifdef CPSW_ETHERSWITCH for (i = 0; i < CPSW_VLANS; i++) { if (cpsw_vgroups[i].vid != -1) continue; cpsw_vgroups[i].vid = sc->vlan; break; } #endif } mii_mediachg(sc->mii); callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static int cpsw_shutdown(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static void cpsw_rx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_RX_LOCK(sc); CPSW_DEBUGF(sc, ("starting RX teardown")); sc->rx.teardown = 1; cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); CPSW_RX_UNLOCK(sc); while (sc->rx.running) { if (++i > 10) { device_printf(sc->dev, "Unable to cleanly shutdown receiver\n"); return; } DELAY(200); } if (!sc->rx.running) CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); } static void cpsw_tx_teardown(struct cpsw_softc *sc) { int i = 0; CPSW_TX_LOCK(sc); CPSW_DEBUGF(sc, ("starting TX teardown")); /* Start the TX queue teardown if queue is not empty. */ if (STAILQ_FIRST(&sc->tx.active) != NULL) cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); else sc->tx.teardown = 1; cpsw_tx_dequeue(sc); while (sc->tx.running && ++i < 10) { DELAY(200); cpsw_tx_dequeue(sc); } if (sc->tx.running) { device_printf(sc->dev, "Unable to cleanly shutdown transmitter\n"); } CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)", i, sc->tx.active_queue_len)); CPSW_TX_UNLOCK(sc); } static void cpswp_stop_locked(struct cpswp_softc *sc) { struct ifnet *ifp; uint32_t reg; ifp = sc->ifp; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; /* Disable interface */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* Stop ticker */ callout_stop(&sc->mii_callout); /* Tear down the RX/TX queues. */ if (cpsw_ports_down(sc->swsc)) { cpsw_rx_teardown(sc->swsc); cpsw_tx_teardown(sc->swsc); } /* Stop MAC RX/TX modules. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg &= ~CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); if (cpsw_ports_down(sc->swsc)) { /* Capture stats before we reset controller. */ cpsw_stats_collect(sc->swsc); cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } } /* * Suspend/Resume. */ static int cpsw_suspend(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static int cpsw_resume(device_t dev) { struct cpsw_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); return (0); } /* * * IOCTL * */ static void cpsw_set_promisc(struct cpswp_softc *sc, int set) { uint32_t reg; /* * Enabling promiscuous mode requires ALE_BYPASS to be enabled. * That disables the ALE forwarding logic and causes every * packet to be sent only to the host port. In bypass mode, * the ALE processes host port transmit packets the same as in * normal mode. */ reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); reg &= ~CPSW_ALE_CTL_BYPASS; if (set) reg |= CPSW_ALE_CTL_BYPASS; cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); } static void cpsw_set_allmulti(struct cpswp_softc *sc, int set) { if (set) { printf("All-multicast mode unimplemented\n"); } } static int cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct cpswp_softc *sc; struct ifreq *ifr; int error; uint32_t changed; error = 0; sc = ifp->if_softc; ifr = (struct ifreq *)data; switch (command) { case SIOCSIFCAP: changed = ifp->if_capenable ^ ifr->ifr_reqcap; if (changed & IFCAP_HWCSUM) { if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) ifp->if_capenable |= IFCAP_HWCSUM; else ifp->if_capenable &= ~IFCAP_HWCSUM; } error = 0; break; case SIOCSIFFLAGS: CPSW_PORT_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { changed = ifp->if_flags ^ sc->if_flags; CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); if (changed & IFF_PROMISC) cpsw_set_promisc(sc, ifp->if_flags & IFF_PROMISC); if (changed & IFF_ALLMULTI) cpsw_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI); } else { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: starting up")); cpswp_init_locked(sc); } } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); cpswp_stop_locked(sc); } sc->if_flags = ifp->if_flags; CPSW_PORT_UNLOCK(sc); break; case SIOCADDMULTI: cpswp_ale_update_addresses(sc, 0); break; case SIOCDELMULTI: /* Ugh. DELMULTI doesn't provide the specific address being removed, so the best we can do is remove everything and rebuild it all. */ cpswp_ale_update_addresses(sc, 1); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } /* * * MIIBUS * */ static int cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) { uint32_t r, retries = CPSW_MIIBUS_RETRIES; while (--retries) { r = cpsw_read_4(sc, reg); if ((r & MDIO_PHYACCESS_GO) == 0) return (1); DELAY(CPSW_MIIBUS_DELAY); } return (0); } static int cpswp_miibus_readreg(device_t dev, int phy, int reg) { struct cpswp_softc *sc; uint32_t cmd, r; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to read\n"); return (0); } /* Set GO, reg, phy */ cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during read\n"); return (0); } r = cpsw_read_4(sc->swsc, sc->phyaccess); if ((r & MDIO_PHYACCESS_ACK) == 0) { device_printf(dev, "Failed to read from PHY.\n"); r = 0; } return (r & 0xFFFF); } static int cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct cpswp_softc *sc; uint32_t cmd; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to write\n"); return (0); } /* Set GO, WRITE, reg, phy, and value */ cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during write\n"); return (0); } return (0); } static void cpswp_miibus_statchg(device_t dev) { struct cpswp_softc *sc; uint32_t mac_control, reg; sc = device_get_softc(dev); CPSW_DEBUGF(sc->swsc, ("")); reg = CPSW_SL_MACCONTROL(sc->unit); mac_control = cpsw_read_4(sc->swsc, reg); mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: mac_control |= CPSW_SL_MACTL_GIG; break; case IFM_100_TX: mac_control |= CPSW_SL_MACTL_IFCTL_A; break; } if (sc->mii->mii_media_active & IFM_FDX) mac_control |= CPSW_SL_MACTL_FULLDUPLEX; cpsw_write_4(sc->swsc, reg, mac_control); } /* * * Transmit/Receive Packets. * */ static void cpsw_intr_rx(void *arg) { struct cpsw_softc *sc; struct ifnet *ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); if (sc->rx.teardown) { sc->rx.running = 0; sc->rx.teardown = 0; cpsw_write_cp(sc, &sc->rx, 0xfffffffc); } received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; (*ifp->if_input)(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static struct mbuf * cpsw_rx_dequeue(struct cpsw_softc *sc) { + int nsegs, port, removed; struct cpsw_cpdma_bd bd; struct cpsw_slot *last, *slot; struct cpswp_softc *psc; - struct mbuf *mb_head, *mb_tail; - int port, removed = 0; + struct mbuf *m, *m0, *mb_head, *mb_tail; + uint16_t m0_flags; + nsegs = 0; + m0 = NULL; last = NULL; - mb_head = mb_tail = NULL; + mb_head = NULL; + mb_tail = NULL; + removed = 0; /* Pull completed packets off hardware RX queue. */ while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { cpsw_cpdma_read_bd(sc, slot, &bd); /* * Stop on packets still in use by hardware, but do not stop * on packets with the teardown complete flag, they will be * discarded later. */ if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == CPDMA_BD_OWNER) break; last = slot; ++removed; STAILQ_REMOVE_HEAD(&sc->rx.active, next); STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); + m = slot->mbuf; + slot->mbuf = NULL; + if (bd.flags & CPDMA_BD_TDOWNCMPLT) { CPSW_DEBUGF(sc, ("RX teardown is complete")); - m_freem(slot->mbuf); - slot->mbuf = NULL; + m_freem(m); sc->rx.running = 0; sc->rx.teardown = 0; break; } port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; KASSERT(port >= 0 && port <= 1, ("patcket received with invalid port: %d", port)); psc = device_get_softc(sc->port[port].dev); /* Set up mbuf */ - /* TODO: track SOP/EOP bits to assemble a full mbuf - out of received fragments. */ - slot->mbuf->m_data += bd.bufoff; - slot->mbuf->m_len = bd.buflen; + m->m_data += bd.bufoff; + m->m_len = bd.buflen; if (bd.flags & CPDMA_BD_SOP) { - slot->mbuf->m_pkthdr.len = bd.pktlen; - slot->mbuf->m_pkthdr.rcvif = psc->ifp; - slot->mbuf->m_flags |= M_PKTHDR; + m->m_pkthdr.len = bd.pktlen; + m->m_pkthdr.rcvif = psc->ifp; + m->m_flags |= M_PKTHDR; + m0_flags = bd.flags; + m0 = m; } - slot->mbuf->m_next = NULL; - slot->mbuf->m_nextpkt = NULL; - if (bd.flags & CPDMA_BD_PASS_CRC) - m_adj(slot->mbuf, -ETHER_CRC_LEN); + nsegs++; + m->m_next = NULL; + m->m_nextpkt = NULL; + if (bd.flags & CPDMA_BD_EOP && m0 != NULL) { + if (m0_flags & CPDMA_BD_PASS_CRC) + m_adj(m0, -ETHER_CRC_LEN); + m0_flags = 0; + m0 = NULL; + if (nsegs > sc->rx.longest_chain) + sc->rx.longest_chain = nsegs; + nsegs = 0; + } if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { /* check for valid CRC by looking into pkt_err[5:4] */ if ((bd.flags & (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == CPDMA_BD_SOP) { - slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; - slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; - slot->mbuf->m_pkthdr.csum_data = 0xffff; + m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; + m->m_pkthdr.csum_flags |= CSUM_IP_VALID; + m->m_pkthdr.csum_data = 0xffff; } } if (STAILQ_FIRST(&sc->rx.active) != NULL && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->rx, STAILQ_FIRST(&sc->rx.active)); sc->rx.queue_restart++; } /* Add mbuf to packet list to be returned. */ - if (mb_tail) { - mb_tail->m_nextpkt = slot->mbuf; + if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) { + mb_tail->m_nextpkt = m; + } else if (mb_tail != NULL) { + mb_tail->m_next = m; + } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) { + if (bootverbose) + printf( + "%s: %s: discanding fragment packet w/o header\n", + __func__, psc->ifp->if_xname); + m_freem(m); + continue; } else { - mb_head = slot->mbuf; + mb_head = m; } - mb_tail = slot->mbuf; - slot->mbuf = NULL; - if (sc->rx_batch > 0 && sc->rx_batch == removed) - break; + mb_tail = m; } if (removed != 0) { cpsw_write_cp_slot(sc, &sc->rx, last); sc->rx.queue_removes += removed; sc->rx.avail_queue_len += removed; sc->rx.active_queue_len -= removed; if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); } return (mb_head); } static void cpsw_rx_enqueue(struct cpsw_softc *sc) { bus_dma_segment_t seg[1]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; int error, nsegs, added = 0; /* Register new mbufs with hardware. */ first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { if (first_new_slot == NULL) first_new_slot = slot; if (slot->mbuf == NULL) { slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (slot->mbuf == NULL) { device_printf(sc->dev, "Unable to fill RX queue\n"); break; } slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size; } error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); KASSERT(error == 0, ("DMA error (error=%d)", error)); if (error != 0 || nsegs != 1) { device_printf(sc->dev, "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", __func__, nsegs, error); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); /* Create and submit new rx descriptor. */ if ((next = STAILQ_NEXT(slot, next)) != NULL) bd.next = cpsw_cpdma_bd_paddr(sc, next); else bd.next = 0; bd.bufptr = seg->ds_addr; bd.bufoff = 0; bd.buflen = MCLBYTES - 1; bd.pktlen = bd.buflen; bd.flags = CPDMA_BD_OWNER; cpsw_cpdma_write_bd(sc, slot, &bd); ++added; STAILQ_REMOVE_HEAD(&sc->rx.avail, next); STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); } if (added == 0 || first_new_slot == NULL) return; CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); /* Link new entries to hardware RX queue. */ if (last_old_slot == NULL) { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } else { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); } sc->rx.queue_adds += added; sc->rx.avail_queue_len -= added; sc->rx.active_queue_len += added; cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) sc->rx.max_active_queue_len = sc->rx.active_queue_len; } static void cpswp_start(struct ifnet *ifp) { struct cpswp_softc *sc; sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->swsc->tx.running == 0) { return; } CPSW_TX_LOCK(sc->swsc); cpswp_tx_enqueue(sc); cpsw_tx_dequeue(sc->swsc); CPSW_TX_UNLOCK(sc->swsc); } static void cpsw_intr_tx(void *arg) { struct cpsw_softc *sc; sc = (struct cpsw_softc *)arg; CPSW_TX_LOCK(sc); if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) cpsw_write_cp(sc, &sc->tx, 0xfffffffc); cpsw_tx_dequeue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); CPSW_TX_UNLOCK(sc); } static void cpswp_tx_enqueue(struct cpswp_softc *sc) { bus_dma_segment_t segs[CPSW_TXFRAGS]; struct cpsw_cpdma_bd bd; struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; struct mbuf *m0; int error, nsegs, seg, added = 0, padlen; /* Pull pending packets from IF queue and prep them for DMA. */ last = NULL; first_new_slot = NULL; last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { IF_DEQUEUE(&sc->ifp->if_snd, m0); if (m0 == NULL) break; slot->mbuf = m0; padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len; if (padlen < 0) padlen = 0; else if (padlen > 0) m_append(slot->mbuf, padlen, sc->swsc->nullpad); /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); /* If the packet is too fragmented, try to simplify. */ if (error == EFBIG || (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) { bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m0 = m_defrag(slot->mbuf, M_NOWAIT); if (m0 == NULL) { device_printf(sc->dev, "Can't defragment packet; dropping\n"); m_freem(slot->mbuf); } else { CPSW_DEBUGF(sc->swsc, ("Requeueing defragmented packet")); IF_PREPEND(&sc->ifp->if_snd, m0); } slot->mbuf = NULL; continue; } if (error != 0) { device_printf(sc->dev, "%s: Can't setup DMA (error=%d), dropping packet\n", __func__, error); bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREWRITE); CPSW_DEBUGF(sc->swsc, ("Queueing TX packet: %d segments + %d pad bytes", nsegs, padlen)); if (first_new_slot == NULL) first_new_slot = slot; /* Link from the previous descriptor. */ if (last != NULL) cpsw_cpdma_write_bd_next(sc->swsc, last, slot); slot->ifp = sc->ifp; /* If there is only one segment, the for() loop * gets skipped and the single buffer gets set up * as both SOP and EOP. */ if (nsegs > 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; /* Start by setting up the first buffer. */ bd.bufptr = segs[0].ds_addr; bd.bufoff = 0; bd.buflen = segs[0].ds_len; bd.pktlen = m_length(slot->mbuf, NULL); bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; if (sc->swsc->dualemac) { bd.flags |= CPDMA_BD_TO_PORT; bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); } for (seg = 1; seg < nsegs; ++seg) { /* Save the previous buffer (which isn't EOP) */ cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); slot = STAILQ_FIRST(&sc->swsc->tx.avail); /* Setup next buffer (which isn't SOP) */ if (nsegs > seg + 1) { next = STAILQ_NEXT(slot, next); bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); } else bd.next = 0; bd.bufptr = segs[seg].ds_addr; bd.bufoff = 0; bd.buflen = segs[seg].ds_len; bd.pktlen = 0; bd.flags = CPDMA_BD_OWNER; } /* Save the final buffer. */ bd.flags |= CPDMA_BD_EOP; cpsw_cpdma_write_bd(sc->swsc, slot, &bd); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); last = slot; added += nsegs; if (nsegs > sc->swsc->tx.longest_chain) sc->swsc->tx.longest_chain = nsegs; BPF_MTAP(sc->ifp, m0); } if (first_new_slot == NULL) return; /* Attach the list of new buffers to the hardware TX queue. */ if (last_old_slot != NULL && (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & CPDMA_BD_EOQ) == 0) { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, first_new_slot); } else { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); } sc->swsc->tx.queue_adds += added; sc->swsc->tx.avail_queue_len -= added; sc->swsc->tx.active_queue_len += added; if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; } CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); } static int cpsw_tx_dequeue(struct cpsw_softc *sc) { struct cpsw_slot *slot, *last_removed_slot = NULL; struct cpsw_cpdma_bd bd; uint32_t flags, removed = 0; /* Pull completed buffers off the hardware TX queue. */ slot = STAILQ_FIRST(&sc->tx.active); while (slot != NULL) { flags = cpsw_cpdma_read_bd_flags(sc, slot); /* TearDown complete is only marked on the SOP for the packet. */ if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { sc->tx.teardown = 1; } - if ((flags & CPDMA_BD_OWNER) != 0 && sc->tx.teardown == 0) + if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) == + (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0) break; /* Hardware is still using this packet. */ bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; if (slot->ifp) { if (sc->tx.teardown == 0) if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); else if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); } /* Dequeue any additional buffers used by this packet. */ while (slot != NULL && slot->mbuf == NULL) { STAILQ_REMOVE_HEAD(&sc->tx.active, next); STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); ++removed; last_removed_slot = slot; slot = STAILQ_FIRST(&sc->tx.active); } cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); /* Restart the TX queue if necessary. */ cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); if (slot != NULL && bd.next != 0 && (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_write_hdp_slot(sc, &sc->tx, slot); sc->tx.queue_restart++; break; } } if (removed != 0) { sc->tx.queue_removes += removed; sc->tx.active_queue_len -= removed; sc->tx.avail_queue_len += removed; if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); } if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { CPSW_DEBUGF(sc, ("TX teardown is complete")); sc->tx.teardown = 0; sc->tx.running = 0; } return (removed); } /* * * Miscellaneous interrupts. * */ static void cpsw_intr_rx_thresh(void *arg) { struct cpsw_softc *sc; struct ifnet *ifp; struct mbuf *received, *next; sc = (struct cpsw_softc *)arg; CPSW_RX_LOCK(sc); received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; (*ifp->if_input)(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static void cpsw_intr_misc_host_error(struct cpsw_softc *sc) { uint32_t intstat; uint32_t dmastat; int txerr, rxerr, txchan, rxchan; printf("\n\n"); device_printf(sc->dev, "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); printf("\n\n"); intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); txerr = (dmastat >> 20) & 15; txchan = (dmastat >> 16) & 7; rxerr = (dmastat >> 12) & 15; rxchan = (dmastat >> 8) & 7; switch (txerr) { case 0: break; case 1: printf("SOP error on TX channel %d\n", txchan); break; case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); break; case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); break; case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); break; case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); break; case 6: printf("Packet length error on TX channel %d\n", txchan); break; default: printf("Unknown error on TX channel %d\n", txchan); break; } if (txerr != 0) { printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); cpsw_dump_queue(sc, &sc->tx.active); } switch (rxerr) { case 0: break; case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); break; case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); break; case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); break; case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); break; default: printf("Unknown RX error on RX channel %d\n", rxchan); break; } if (rxerr != 0) { printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); printf("CPSW_CPDMA_RX%d_CP=0x%x\n", rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); cpsw_dump_queue(sc, &sc->rx.active); } printf("\nALE Table\n"); cpsw_ale_dump_table(sc); // XXX do something useful here?? panic("CPSW HOST ERROR INTERRUPT"); // Suppress this interrupt in the future. cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); // The watchdog will probably reset the controller // in a little while. It will probably fail again. } static void cpsw_intr_misc(void *arg) { struct cpsw_softc *sc = arg; uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); if (stat & CPSW_WR_C_MISC_EVNT_PEND) CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); if (stat & CPSW_WR_C_MISC_STAT_PEND) cpsw_stats_collect(sc); if (stat & CPSW_WR_C_MISC_HOST_PEND) cpsw_intr_misc_host_error(sc); if (stat & CPSW_WR_C_MISC_MDIOLINK) { cpsw_write_4(sc, MDIOLINKINTMASKED, cpsw_read_4(sc, MDIOLINKINTMASKED)); } if (stat & CPSW_WR_C_MISC_MDIOUSER) { CPSW_DEBUGF(sc, ("MDIO operation completed interrupt unimplemented")); } cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); } /* * * Periodic Checks and Watchdog. * */ static void cpswp_tick(void *msc) { struct cpswp_softc *sc = msc; /* Check for media type change */ mii_tick(sc->mii); if (sc->media_status != sc->mii->mii_media.ifm_media) { printf("%s: media type changed (ifm_media=%x)\n", __func__, sc->mii->mii_media.ifm_media); cpswp_ifmedia_upd(sc->ifp); } /* Schedule another timeout one second from now */ callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); } static void cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct cpswp_softc *sc; struct mii_data *mii; sc = ifp->if_softc; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; CPSW_PORT_UNLOCK(sc); } static int cpswp_ifmedia_upd(struct ifnet *ifp) { struct cpswp_softc *sc; sc = ifp->if_softc; CPSW_DEBUGF(sc->swsc, ("")); CPSW_PORT_LOCK(sc); mii_mediachg(sc->mii); sc->media_status = sc->mii->mii_media.ifm_media; CPSW_PORT_UNLOCK(sc); return (0); } static void cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) { struct cpswp_softc *psc; int i; cpsw_debugf_head("CPSW watchdog"); device_printf(sc->dev, "watchdog timeout\n"); printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); cpsw_dump_queue(sc, &sc->tx.active); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } } static void cpsw_tx_watchdog(void *msc) { struct cpsw_softc *sc; sc = msc; CPSW_TX_LOCK(sc); if (sc->tx.active_queue_len == 0 || !sc->tx.running) { sc->watchdog.timer = 0; /* Nothing to do. */ } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ } else if (cpsw_tx_dequeue(sc) > 0) { sc->watchdog.timer = 0; /* We just did something. */ } else { /* There was something to do but it didn't get done. */ ++sc->watchdog.timer; if (sc->watchdog.timer > 5) { sc->watchdog.timer = 0; ++sc->watchdog.resets; cpsw_tx_watchdog_full_reset(sc); } } sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; CPSW_TX_UNLOCK(sc); /* Schedule another timeout one second from now */ callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * ALE support routines. * */ static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); } static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); } static void cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; /* First four entries are link address and broadcast. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); } } } static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, uint8_t *mac) { int free_index = -1, matching_index = -1, i; uint32_t ale_entry[3], ale_type; /* Find a matching entry or a free entry. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && (((ale_entry[0] >>24) & 0xFF) == mac[2]) && (((ale_entry[0] >>16) & 0xFF) == mac[3]) && (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (ENOMEM); i = free_index; } if (vlan != -1) ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; else ale_type = ALE_TYPE_ADDR << 28; /* Set MAC address */ ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = mac[0] << 8 | mac[1]; /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ ale_entry[1] |= ALE_MCAST_FWD | ale_type; /* Set portmask [68:66] */ ale_entry[2] = (portmap & 7) << 2; cpsw_ale_write_entry(sc, i, ale_entry); return 0; } static void cpsw_ale_dump_table(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); switch (ALE_TYPE(ale_entry)) { case ALE_TYPE_VLAN: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); printf("\n"); break; case ALE_TYPE_ADDR: case ALE_TYPE_VLAN_ADDR: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", (ale_entry[1] >> 8) & 0xFF, (ale_entry[1] >> 0) & 0xFF, (ale_entry[0] >>24) & 0xFF, (ale_entry[0] >>16) & 0xFF, (ale_entry[0] >> 8) & 0xFF, (ale_entry[0] >> 0) & 0xFF); printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("port: %u ", ALE_PORTS(ale_entry)); printf("\n"); break; } } printf("\n"); } static int cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) { uint8_t *mac; uint32_t ale_entry[3], ale_type, portmask; struct ifmultiaddr *ifma; if (sc->swsc->dualemac) { ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; portmask = 1 << (sc->unit + 1) | 1 << 0; } else { ale_type = ALE_TYPE_ADDR << 28; portmask = 7; } /* * Route incoming packets for our MAC address to Port 0 (host). * For simplicity, keep this entry at table index 0 for port 1 and * at index 2 for port 2 in the ALE. */ if_addr_rlock(sc->ifp); mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ ale_entry[2] = 0; /* port = 0 */ cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); /* Set outgoing MAC Address for slave port. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), mac[5] << 8 | mac[4]); if_addr_runlock(sc->ifp); /* Keep the broadcast address at table entry 1 (or 3). */ ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; ale_entry[2] = portmask << 2; cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); /* SIOCDELMULTI doesn't specify the particular address being removed, so we have to remove all and rebuild. */ if (purge) cpsw_ale_remove_all_mc_entries(sc->swsc); /* Set other multicast addrs desired. */ if_maddr_rlock(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); } if_maddr_runlock(sc->ifp); return (0); } static int cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, int untag, int mcregflood, int mcunregflood) { int free_index, i, matching_index; uint32_t ale_entry[3]; free_index = matching_index = -1; /* Find a matching entry or a free entry. */ for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if (ALE_VLAN(ale_entry) == vlan) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (-1); i = free_index; } ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | (mcunregflood & 7) << 8 | (ports & 7); ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); return (0); } /* * * Statistics and Sysctls. * */ #if 0 static void cpsw_stats_dump(struct cpsw_softc *sc) { int i; uint32_t r; for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, (intmax_t)sc->shadow_stats[i], r, (intmax_t)sc->shadow_stats[i] + r)); } } #endif static void cpsw_stats_collect(struct cpsw_softc *sc) { int i; uint32_t r; CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); sc->shadow_stats[i] += r; cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); } } static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct cpsw_stat *stat; uint64_t result; sc = (struct cpsw_softc *)arg1; stat = &cpsw_stat_sysctls[oidp->oid_number]; result = sc->shadow_stats[oidp->oid_number]; result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); return (sysctl_handle_64(oidp, &result, 0, req)); } static int cpsw_stat_attached(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct bintime t; unsigned result; sc = (struct cpsw_softc *)arg1; getbinuptime(&t); bintime_sub(&t, &sc->attach_uptime); result = t.sec; return (sysctl_handle_int(oidp, &result, 0, req)); } static int cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) { int error; struct cpsw_softc *sc; uint32_t ctrl, intr_per_ms; sc = (struct cpsw_softc *)arg1; error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); if (error != 0 || req->newptr == NULL) return (error); ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); if (sc->coal_us == 0) { /* Disable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); return (0); } if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) sc->coal_us = CPSW_WR_C_IMAX_US_MAX; if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) sc->coal_us = CPSW_WR_C_IMAX_US_MIN; intr_per_ms = 1000 / sc->coal_us; /* Just to make sure... */ if (intr_per_ms > CPSW_WR_C_IMAX_MAX) intr_per_ms = CPSW_WR_C_IMAX_MAX; if (intr_per_ms < CPSW_WR_C_IMAX_MIN) intr_per_ms = CPSW_WR_C_IMAX_MIN; /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; /* Enable the interrupt pace hardware. */ cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); return (0); } static int cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *swsc; struct cpswp_softc *sc; struct bintime t; unsigned result; swsc = arg1; sc = device_get_softc(swsc->port[arg2].dev); if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { getbinuptime(&t); bintime_sub(&t, &sc->init_uptime); result = t.sec; } else result = 0; return (sysctl_handle_int(oidp, &result, 0, req)); } static void cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", CTLFLAG_RD, &queue->queue_slots, 0, "Total buffers currently assigned to this queue"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", CTLFLAG_RD, &queue->active_queue_len, 0, "Buffers currently registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", CTLFLAG_RD, &queue->max_active_queue_len, 0, "Max value of activeBuffers since last driver reset"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", CTLFLAG_RD, &queue->avail_queue_len, 0, "Buffers allocated to this queue but not currently " "registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", CTLFLAG_RD, &queue->max_avail_queue_len, 0, "Max value of availBuffers since last driver reset"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", CTLFLAG_RD, &queue->queue_adds, 0, "Total buffers added to queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", CTLFLAG_RD, &queue->queue_removes, 0, "Total buffers removed from queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", CTLFLAG_RD, &queue->queue_restart, 0, "Total times the queue has been restarted"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", CTLFLAG_RD, &queue->longest_chain, 0, "Max buffers used for a single packet"); } static void cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", CTLFLAG_RD, &sc->watchdog.resets, 0, "Total number of watchdog resets"); } static void cpsw_add_sysctls(struct cpsw_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *stats_node, *queue_node, *node; struct sysctl_oid_list *parent, *stats_parent, *queue_parent; struct sysctl_oid_list *ports_parent, *port_parent; char port[16]; int i; ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); - - SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "rx_batch", - CTLFLAG_RW, &sc->rx_batch, 0, "Set the rx batch size"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", "Time since driver attach"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU", "minimum time between interrupts"); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", CTLFLAG_RD, NULL, "CPSW Ports Statistics"); ports_parent = SYSCTL_CHILDREN(node); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; port[0] = '0' + i; port[1] = '\0'; node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, port, CTLFLAG_RD, NULL, "CPSW Port Statistics"); port_parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", CTLTYPE_UINT | CTLFLAG_RD, sc, i, cpsw_stat_uptime, "IU", "Seconds since driver init"); } stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "CPSW Statistics"); stats_parent = SYSCTL_CHILDREN(stats_node); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { SYSCTL_ADD_PROC(ctx, stats_parent, i, cpsw_stat_sysctls[i].oid, CTLTYPE_U64 | CTLFLAG_RD, sc, 0, cpsw_stats_sysctl, "IU", cpsw_stat_sysctls[i].oid); } queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", CTLFLAG_RD, NULL, "CPSW Queue Statistics"); queue_parent = SYSCTL_CHILDREN(queue_node); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "TX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->tx); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "RX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->rx); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", CTLFLAG_RD, NULL, "Watchdog Statistics"); cpsw_add_watchdog_sysctls(ctx, node, sc); } #ifdef CPSW_ETHERSWITCH static etherswitch_info_t etherswitch_info = { .es_nports = CPSW_PORTS + 1, .es_nvlangroups = CPSW_VLANS, .es_name = "TI Common Platform Ethernet Switch (CPSW)", .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, }; static etherswitch_info_t * cpsw_getinfo(device_t dev) { return (ðerswitch_info); } static int cpsw_getport(device_t dev, etherswitch_port_t *p) { int err; struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmediareq *ifmr; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); err = 0; sc = device_get_softc(dev); if (p->es_port == CPSW_CPU_PORT) { p->es_flags |= ETHERSWITCH_PORT_CPU; ifmr = &p->es_ifmr; ifmr->ifm_current = ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; ifmr->ifm_count = 0; } else { psc = device_get_softc(sc->port[p->es_port - 1].dev); err = ifmedia_ioctl(psc->ifp, &p->es_ifr, &psc->mii->mii_media, SIOCGIFMEDIA); } reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); p->es_pvid = reg & ETHERSWITCH_VID_MASK; reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (reg & ALE_PORTCTL_DROP_UNTAGGED) p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; if (reg & ALE_PORTCTL_INGRESS) p->es_flags |= ETHERSWITCH_PORT_INGRESS; return (err); } static int cpsw_setport(device_t dev, etherswitch_port_t *p) { struct cpsw_softc *sc; struct cpswp_softc *psc; struct ifmedia *ifm; uint32_t reg; if (p->es_port < 0 || p->es_port > CPSW_PORTS) return (ENXIO); sc = device_get_softc(dev); if (p->es_pvid != 0) { cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), p->es_pvid & ETHERSWITCH_VID_MASK); } reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) reg |= ALE_PORTCTL_DROP_UNTAGGED; else reg &= ~ALE_PORTCTL_DROP_UNTAGGED; if (p->es_flags & ETHERSWITCH_PORT_INGRESS) reg |= ALE_PORTCTL_INGRESS; else reg &= ~ALE_PORTCTL_INGRESS; cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); /* CPU port does not allow media settings. */ if (p->es_port == CPSW_CPU_PORT) return (0); psc = device_get_softc(sc->port[p->es_port - 1].dev); ifm = &psc->mii->mii_media; return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); } static int cpsw_getconf(device_t dev, etherswitch_conf_t *conf) { /* Return the VLAN mode. */ conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; return (0); } static int cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i, vid; uint32_t ale_entry[3]; struct cpsw_softc *sc; sc = device_get_softc(dev); if (vg->es_vlangroup >= CPSW_VLANS) return (EINVAL); vg->es_vid = 0; vid = cpsw_vgroups[vg->es_vlangroup].vid; if (vid == -1) return (0); for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vid != ALE_VLAN(ale_entry)) continue; vg->es_fid = 0; vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); } return (0); } static void cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) continue; if (vlan != ALE_VLAN(ale_entry)) continue; ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); break; } } static int cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) { int i; struct cpsw_softc *sc; sc = device_get_softc(dev); for (i = 0; i < CPSW_VLANS; i++) { /* Is this Vlan ID in use by another vlangroup ? */ if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) return (EINVAL); } if (vg->es_vid == 0) { if (cpsw_vgroups[vg->es_vlangroup].vid == -1) return (0); cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); cpsw_vgroups[vg->es_vlangroup].vid = -1; vg->es_untagged_ports = 0; vg->es_member_ports = 0; vg->es_vid = 0; return (0); } vg->es_vid &= ETHERSWITCH_VID_MASK; vg->es_member_ports &= CPSW_PORTS_MASK; vg->es_untagged_ports &= CPSW_PORTS_MASK; if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) return (EINVAL); cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, vg->es_untagged_ports, vg->es_member_ports, 0); return (0); } static int cpsw_readreg(device_t dev, int addr) { /* Not supported. */ return (0); } static int cpsw_writereg(device_t dev, int addr, int value) { /* Not supported. */ return (0); } static int cpsw_readphy(device_t dev, int phy, int reg) { /* Not supported. */ return (0); } static int cpsw_writephy(device_t dev, int phy, int reg, int data) { /* Not supported. */ return (0); } #endif Index: projects/clang400-import/sys/arm/ti/cpsw/if_cpswvar.h =================================================================== --- projects/clang400-import/sys/arm/ti/cpsw/if_cpswvar.h (revision 312719) +++ projects/clang400-import/sys/arm/ti/cpsw/if_cpswvar.h (revision 312720) @@ -1,150 +1,149 @@ /*- * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _IF_CPSWVAR_H #define _IF_CPSWVAR_H #define CPSW_PORTS 2 #define CPSW_INTR_COUNT 4 /* MII BUS */ #define CPSW_MIIBUS_RETRIES 20 #define CPSW_MIIBUS_DELAY 100 #define CPSW_MAX_ALE_ENTRIES 1024 #define CPSW_SYSCTL_COUNT 34 #ifdef CPSW_ETHERSWITCH #define CPSW_CPU_PORT 0 #define CPSW_PORTS_MASK 0x7 #define CPSW_VLANS 128 /* Arbitrary number. */ struct cpsw_vlangroups { int vid; }; #endif struct cpsw_slot { uint32_t bd_offset; /* Offset of corresponding BD within CPPI RAM. */ bus_dmamap_t dmamap; struct ifnet *ifp; struct mbuf *mbuf; STAILQ_ENTRY(cpsw_slot) next; }; STAILQ_HEAD(cpsw_slots, cpsw_slot); struct cpsw_queue { struct mtx lock; int running; int teardown; struct cpsw_slots active; struct cpsw_slots avail; uint32_t queue_adds; /* total bufs added */ uint32_t queue_removes; /* total bufs removed */ uint32_t queue_removes_at_last_tick; /* Used by watchdog */ uint32_t queue_restart; int queue_slots; int active_queue_len; int max_active_queue_len; int avail_queue_len; int max_avail_queue_len; int longest_chain; /* Largest # segments in a single packet. */ int hdp_offset; }; struct cpsw_port { device_t dev; int phy; int vlan; }; struct cpsw_softc { device_t dev; int active_slave; int debug; int dualemac; - int rx_batch; phandle_t node; struct bintime attach_uptime; /* system uptime when attach happened. */ struct cpsw_port port[2]; unsigned coal_us; /* RX and TX buffer tracking */ struct cpsw_queue rx, tx; /* We expect 1 memory resource and 4 interrupts from the device tree. */ int mem_rid; struct resource *mem_res; struct resource *irq_res[CPSW_INTR_COUNT]; void *ih_cookie[CPSW_INTR_COUNT]; /* A buffer full of nulls for TX padding. */ void *nullpad; bus_dma_tag_t mbuf_dtag; struct { int resets; int timer; struct callout callout; } watchdog; /* 64-bit versions of 32-bit hardware statistics counters */ uint64_t shadow_stats[CPSW_SYSCTL_COUNT]; /* CPPI STATERAM has 512 slots for building TX/RX queues. */ /* TODO: Size here supposedly varies with different versions of the controller. Check DaVinci specs and find a good way to adjust this. One option is to have a separate Device Tree parameter for number slots; another option is to calculate it from the memory size in the device tree. */ struct cpsw_slot _slots[CPSW_CPPI_RAM_SIZE / sizeof(struct cpsw_cpdma_bd)]; struct cpsw_slots avail; }; struct cpswp_softc { device_t dev; device_t miibus; device_t pdev; int media_status; int unit; int vlan; struct bintime init_uptime; /* system uptime when init happened. */ struct callout mii_callout; struct cpsw_softc *swsc; struct ifnet *ifp; struct mii_data *mii; struct mtx lock; uint32_t if_flags; uint32_t phy; uint32_t phyaccess; uint32_t physel; }; #endif /*_IF_CPSWVAR_H */ Index: projects/clang400-import/sys/arm64/arm64/mp_machdep.c =================================================================== --- projects/clang400-import/sys/arm64/arm64/mp_machdep.c (revision 312719) +++ projects/clang400-import/sys/arm64/arm64/mp_machdep.c (revision 312720) @@ -1,839 +1,837 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_acpi.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #endif #include #include "pic_if.h" typedef void intr_ipi_send_t(void *, cpuset_t, u_int); typedef void intr_ipi_handler_t(void *); #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) struct intr_ipi { intr_ipi_handler_t * ii_handler; void * ii_handler_arg; intr_ipi_send_t * ii_send; void * ii_send_arg; char ii_name[INTR_IPI_NAMELEN]; u_long * ii_count; }; static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; static struct intr_ipi *intr_ipi_lookup(u_int); static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *, void *); extern struct pcpu __pcpu[]; static device_identify_t arm64_cpu_identify; static device_probe_t arm64_cpu_probe; static device_attach_t arm64_cpu_attach; static void ipi_ast(void *); static void ipi_hardclock(void *); static void ipi_preempt(void *); static void ipi_rendezvous(void *); static void ipi_stop(void *); -static int ipi_handler(void *arg); - struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; static device_t cpu_list[MAXCPU]; /* * Not all systems boot from the first CPU in the device tree. To work around * this we need to find which CPU we have booted from so when we later * enable the secondary CPUs we skip this one. */ static int cpu0 = -1; void mpentry(unsigned long cpuid); void init_secondary(uint64_t); uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16); /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; static device_method_t arm64_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_identify, arm64_cpu_identify), DEVMETHOD(device_probe, arm64_cpu_probe), DEVMETHOD(device_attach, arm64_cpu_attach), DEVMETHOD_END }; static devclass_t arm64_cpu_devclass; static driver_t arm64_cpu_driver = { "arm64_cpu", arm64_cpu_methods, 0 }; DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0); static void arm64_cpu_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "arm64_cpu", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL) device_printf(parent, "add child failed\n"); } static int arm64_cpu_probe(device_t dev) { u_int cpuid; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); device_quiet(dev); return (0); } static int arm64_cpu_attach(device_t dev) { const uint32_t *reg; size_t reg_size; u_int cpuid; int i; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid)); reg = cpu_get_cpuid(dev, ®_size); if (reg == NULL) return (EINVAL); if (bootverbose) { device_printf(dev, "register <"); for (i = 0; i < reg_size; i++) printf("%s%x", (i == 0) ? "" : " ", reg[i]); printf(">\n"); } /* Set the device to start it later */ cpu_list[cpuid] = dev; return (0); } static void release_aps(void *dummy __unused) { int i; /* Only release CPUs if they exist */ if (mp_ncpus == 1) return; intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL); intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); atomic_store_rel_int(&aps_ready, 1); /* Wake up the other CPUs */ __asm __volatile("sev"); printf("Release APs\n"); for (i = 0; i < 2000; i++) { if (smp_started) return; DELAY(1000); } printf("APs not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); void init_secondary(uint64_t cpu) { struct pcpu *pcpup; pcpup = &__pcpu[cpu]; /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); /* Spin until the BSP releases the APs */ while (!aps_ready) __asm __volatile("wfe"); /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb; /* * Identify current CPU. This is necessary to setup * affinity registers and to provide support for * runtime chip identification. */ identify_cpu(); intr_pic_init_secondary(); /* Start per-CPU event timers. */ cpu_initclocks_ap(); #ifdef VFP vfp_init(); #endif dbg_monitor_init(); /* Enable interrupts */ intr_enable(); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ } /* * Send IPI thru interrupt controller. */ static void pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi) { KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi); } /* * Setup IPI handler on interrupt controller. * * Not SMP coherent. */ static void intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, void *arg) { struct intr_irqsrc *isrc; struct intr_ipi *ii; int error; KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc); if (error != 0) return; isrc->isrc_handlers++; ii = intr_ipi_lookup(ipi); KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); ii->ii_handler = hand; ii->ii_handler_arg = arg; ii->ii_send = pic_ipi_send; ii->ii_send_arg = isrc; strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); ii->ii_count = intr_ipi_setup_counters(name); } static void intr_ipi_send(cpuset_t cpus, u_int ipi) { struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); ii->ii_send(ii->ii_send_arg, cpus, ipi); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_hardclock(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); } static void ipi_preempt(void *dummy __unused) { CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); } static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_stop(void *dummy __unused) { u_int cpu; CTR0(KTR_SMP, "IPI_STOP"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); } struct cpu_group * cpu_topo(void) { return (smp_topo_none()); } /* Determine if we running MP machine */ int cpu_mp_probe(void) { /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */ return (1); } static bool start_cpu(u_int id, uint64_t target_cpu) { struct pcpu *pcpup; vm_paddr_t pa; u_int cpuid; int err; /* Check we are able to start this cpu */ if (id > mp_maxid) return (false); KASSERT(id < MAXCPU, ("Too many CPUs")); /* We are already running on cpu 0 */ if (id == cpu0) return (true); /* * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other * CPUs ordered as the are likely grouped into clusters so it can be * useful to keep that property, e.g. for the GICv3 driver to send * an IPI to all CPUs in the cluster. */ cpuid = id; if (cpuid < cpu0) cpuid += mp_maxid + 1; cpuid -= cpu0; pcpup = &__pcpu[cpuid]; pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); dpcpu_init(dpcpu[cpuid - 1], cpuid); printf("Starting CPU %u (%lx)\n", cpuid, target_cpu); pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry); err = psci_cpu_on(target_cpu, pa, cpuid); if (err != PSCI_RETVAL_SUCCESS) { /* * Panic here if INVARIANTS are enabled and PSCI failed to * start the requested CPU. If psci_cpu_on returns PSCI_MISSING * to indicate we are unable to use it to start the given CPU. */ KASSERT(err == PSCI_MISSING, ("Failed to start CPU %u (%lx)\n", id, target_cpu)); pcpu_destroy(pcpup); kmem_free(kernel_arena, (vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE); dpcpu[cpuid - 1] = NULL; /* Notify the user that the CPU failed to start */ printf("Failed to start CPU %u (%lx)\n", id, target_cpu); } else CPU_SET(cpuid, &all_cpus); return (true); } #ifdef DEV_ACPI static void madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { ACPI_MADT_GENERIC_INTERRUPT *intr; u_int *cpuid; switch(entry->Type) { case ACPI_MADT_TYPE_GENERIC_INTERRUPT: intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry; cpuid = arg; start_cpu((*cpuid), intr->ArmMpidr); (*cpuid)++; break; default: break; } } static void cpu_init_acpi(void) { ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; u_int cpuid; physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return; madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { printf("Unable to map the MADT, not starting APs\n"); return; } cpuid = 0; acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, madt_handler, &cpuid); acpi_unmap_table(madt); } #endif #ifdef FDT static boolean_t cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { uint64_t target_cpu; target_cpu = reg[0]; if (addr_size == 2) { target_cpu <<= 32; target_cpu |= reg[1]; } return (start_cpu(id, target_cpu) ? TRUE : FALSE); } #endif /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); CPU_SET(0, &all_cpus); switch(arm64_bus_method) { #ifdef DEV_ACPI case ARM64_BUS_ACPI: KASSERT(cpu0 >= 0, ("Current CPU was not found")); cpu_init_acpi(); break; #endif #ifdef FDT case ARM64_BUS_FDT: KASSERT(cpu0 >= 0, ("Current CPU was not found")); ofw_cpu_early_foreach(cpu_init_fdt, true); break; #endif default: break; } } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } #ifdef DEV_ACPI static void cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg) { ACPI_MADT_GENERIC_INTERRUPT *intr; u_int *cores = arg; uint64_t mpidr_reg; switch(entry->Type) { case ACPI_MADT_TYPE_GENERIC_INTERRUPT: intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry; if (cpu0 < 0) { mpidr_reg = READ_SPECIALREG(mpidr_el1); if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr) cpu0 = *cores; } (*cores)++; break; default: break; } } static u_int cpu_count_acpi(void) { ACPI_TABLE_MADT *madt; vm_paddr_t physaddr; u_int cores; physaddr = acpi_find_table(ACPI_SIG_MADT); if (physaddr == 0) return (0); madt = acpi_map_table(physaddr, ACPI_SIG_MADT); if (madt == NULL) { printf("Unable to map the MADT, not starting APs\n"); return (0); } cores = 0; acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length, cpu_count_acpi_handler, &cores); acpi_unmap_table(madt); return (cores); } #endif #ifdef FDT static boolean_t cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { uint64_t mpidr_fdt, mpidr_reg; if (cpu0 < 0) { mpidr_fdt = reg[0]; if (addr_size == 2) { mpidr_fdt <<= 32; mpidr_fdt |= reg[1]; } mpidr_reg = READ_SPECIALREG(mpidr_el1); if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt) cpu0 = id; } return (TRUE); } #endif void cpu_mp_setmaxid(void) { #if defined(DEV_ACPI) || defined(FDT) int cores; #endif switch(arm64_bus_method) { #ifdef DEV_ACPI case ARM64_BUS_ACPI: cores = cpu_count_acpi(); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the ACPI tables\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; return; } break; #endif #ifdef FDT case ARM64_BUS_FDT: cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the device tree\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; return; } break; #endif default: break; } if (bootverbose) printf("No CPU data, limiting to 1 core\n"); mp_ncpus = 1; mp_maxid = 0; } /* * Lookup IPI source. */ static struct intr_ipi * intr_ipi_lookup(u_int ipi) { if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); return (&ipi_sources[ipi]); } /* * interrupt controller dispatch function for IPIs. It should * be called straight from the interrupt controller, when associated * interrupt source is learned. Or from anybody who has an interrupt * source mapped. */ void intr_ipi_dispatch(u_int ipi, struct trapframe *tf) { void *arg; struct intr_ipi *ii; ii = intr_ipi_lookup(ipi); if (ii->ii_count == NULL) panic("%s: not setup IPI %u", __func__, ipi); intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); /* * Supply ipi filter with trapframe argument * if none is registered. */ arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf; ii->ii_handler(arg); } #ifdef notyet /* * Map IPI into interrupt controller. * * Not SMP coherent. */ static int ipi_map(struct intr_irqsrc *isrc, u_int ipi) { boolean_t is_percpu; int error; if (ipi >= INTR_IPI_COUNT) panic("%s: no such IPI %u", __func__, ipi); KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); isrc->isrc_type = INTR_ISRCT_NAMESPACE; isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI; isrc->isrc_nspc_num = ipi_next_num; error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu); if (error == 0) { isrc->isrc_dev = intr_irq_root_dev; ipi_next_num++; } return (error); } /* * Setup IPI handler to interrupt source. * * Note that there could be more ways how to send and receive IPIs * on a platform like fast interrupts for example. In that case, * one can call this function with ASIF_NOALLOC flag set and then * call intr_ipi_dispatch() when appropriate. * * Not SMP coherent. */ int intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter, void *arg, u_int flags) { struct intr_irqsrc *isrc; int error; if (filter == NULL) return(EINVAL); isrc = intr_ipi_lookup(ipi); if (isrc->isrc_ipifilter != NULL) return (EEXIST); if ((flags & AISHF_NOALLOC) == 0) { error = ipi_map(isrc, ipi); if (error != 0) return (error); } isrc->isrc_ipifilter = filter; isrc->isrc_arg = arg; isrc->isrc_handlers = 1; isrc->isrc_count = intr_ipi_setup_counters(name); isrc->isrc_index = 0; /* it should not be used in IPI case */ if (isrc->isrc_dev != NULL) { PIC_ENABLE_INTR(isrc->isrc_dev, isrc); PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); } return (0); } #endif /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t cpus; cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); intr_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); intr_ipi_send(cpus, ipi); } Index: projects/clang400-import/sys/boot/fdt/dts/arm/beaglebone-black.dts =================================================================== --- projects/clang400-import/sys/boot/fdt/dts/arm/beaglebone-black.dts (revision 312719) +++ projects/clang400-import/sys/boot/fdt/dts/arm/beaglebone-black.dts (revision 312720) @@ -1,93 +1,87 @@ /*- * Copyright (c) 2015 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "am335x-boneblack.dts" #include "beaglebone-common.dtsi" &am33xx_pinmux { i2c1_pins: pinmux_i2c1_pins { pinctrl-single,pins = < AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_d1.i2c1_sda */ AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_cs0.i2c1_scl */ >; }; spi1_pins: pinmux_spi1_pins { pinctrl-single,pins = < AM33XX_IOPAD(0x964, PIN_INPUT_PULLUP | MUX_MODE2) /* eCAP0_in_PWM0_out.spi1_cs1 */ AM33XX_IOPAD(0x990, PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcasp0_aclkx.spi1_sclk */ AM33XX_IOPAD(0x994, PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcasp0_fsx.spi1_d0 - miso */ AM33XX_IOPAD(0x998, PIN_INPUT_PULLUP | MUX_MODE3) /* mcasp0_axr0.spi1_d1 - mosi */ AM33XX_IOPAD(0x99c, PIN_INPUT_PULLUP | MUX_MODE3) /* mcasp0_ahclkr.spi1_cs0 */ >; }; }; &i2c0 { - tda998x: hdmi-encoder { - compatible = "nxp,tda998x"; - reg = <0x70>; - - pinctrl-names = "default", "off"; - pinctrl-0 = <&nxp_hdmi_bonelt_pins>; - pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>; + tda998x: tda19988 { status = "okay"; }; }; &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&i2c1_pins>; status = "okay"; }; &i2c2 { pinctrl-names = "default"; pinctrl-0 = <&i2c2_pins>; status = "okay"; }; &spi1 { pinctrl-names = "default"; pinctrl-0 = <&spi1_pins>; status = "okay"; }; &lcdc { hdmi = <&tda998x>; }; / { hdmi { status = "disabled"; }; }; Index: projects/clang400-import/sys/cam/ctl/ctl_backend_ramdisk.c =================================================================== --- projects/clang400-import/sys/cam/ctl/ctl_backend_ramdisk.c (revision 312719) +++ projects/clang400-import/sys/cam/ctl/ctl_backend_ramdisk.c (revision 312720) @@ -1,929 +1,1361 @@ /*- * Copyright (c) 2003, 2008 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation - * Copyright (c) 2014-2015 Alexander Motin + * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ */ /* - * CAM Target Layer backend for a "fake" ramdisk. + * CAM Target Layer black hole and RAM disk backend. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#define PRIV(io) \ + ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) +#define ARGS(io) \ + ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) + +#define PPP (PAGE_SIZE / sizeof(uint8_t **)) +#ifdef __LP64__ +#define PPPS (PAGE_SHIFT - 3) +#else +#define PPPS (PAGE_SHIFT - 2) +#endif +#define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) + +#define P_UNMAPPED NULL /* Page is unmapped. */ +#define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ + typedef enum { + GP_READ, /* Return data page or zero page. */ + GP_WRITE, /* Return data page, try allocate if none. */ + GP_ANCHOR, /* Return data page, try anchor if none. */ + GP_OTHER, /* Return what present, do not allocate/anchor. */ +} getpage_op_t; + +typedef enum { CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, CTL_BE_RAMDISK_LUN_WAITING = 0x04 } ctl_be_ramdisk_lun_flags; struct ctl_be_ramdisk_lun { struct ctl_lun_create_params params; - char lunname[32]; - uint64_t size_bytes; - uint64_t size_blocks; + char lunname[32]; + int indir; + uint8_t **pages; + uint8_t *zero_page; + struct sx page_lock; + u_int pblocksize; + u_int pblockmul; + uint64_t size_bytes; + uint64_t size_blocks; + uint64_t cap_bytes; + uint64_t cap_used; struct ctl_be_ramdisk_softc *softc; ctl_be_ramdisk_lun_flags flags; STAILQ_ENTRY(ctl_be_ramdisk_lun) links; - struct ctl_be_lun cbe_lun; - struct taskqueue *io_taskqueue; - struct task io_task; + struct ctl_be_lun cbe_lun; + struct taskqueue *io_taskqueue; + struct task io_task; STAILQ_HEAD(, ctl_io_hdr) cont_queue; - struct mtx_padalign queue_lock; + struct mtx_padalign queue_lock; }; struct ctl_be_ramdisk_softc { struct mtx lock; - int rd_size; -#ifdef CTL_RAMDISK_PAGES - uint8_t **ramdisk_pages; - int num_pages; -#else - uint8_t *ramdisk_buffer; -#endif int num_luns; STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; }; static struct ctl_be_ramdisk_softc rd_softc; extern struct ctl_softc *control_softc; static int ctl_backend_ramdisk_init(void); static int ctl_backend_ramdisk_shutdown(void); static int ctl_backend_ramdisk_move_done(union ctl_io *io); +static void ctl_backend_ramdisk_compare(union ctl_io *io); +static void ctl_backend_ramdisk_rw(union ctl_io *io); static int ctl_backend_ramdisk_submit(union ctl_io *io); -static void ctl_backend_ramdisk_continue(union ctl_io *io); +static void ctl_backend_ramdisk_worker(void *context, int pending); +static int ctl_backend_ramdisk_config_read(union ctl_io *io); +static int ctl_backend_ramdisk_config_write(union ctl_io *io); +static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname); static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); -static void ctl_backend_ramdisk_worker(void *context, int pending); static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); static void ctl_backend_ramdisk_lun_config_status(void *be_lun, ctl_lun_config_status status); -static int ctl_backend_ramdisk_config_write(union ctl_io *io); -static int ctl_backend_ramdisk_config_read(union ctl_io *io); static struct ctl_backend_driver ctl_be_ramdisk_driver = { .name = "ramdisk", .flags = CTL_BE_FLAG_HAS_CONFIG, .init = ctl_backend_ramdisk_init, .shutdown = ctl_backend_ramdisk_shutdown, .data_submit = ctl_backend_ramdisk_submit, .data_move_done = ctl_backend_ramdisk_move_done, .config_read = ctl_backend_ramdisk_config_read, .config_write = ctl_backend_ramdisk_config_write, - .ioctl = ctl_backend_ramdisk_ioctl + .ioctl = ctl_backend_ramdisk_ioctl, + .lun_attr = ctl_backend_ramdisk_lun_attr, }; MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); -int +static int ctl_backend_ramdisk_init(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; -#ifdef CTL_RAMDISK_PAGES - int i; -#endif memset(softc, 0, sizeof(*softc)); mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_list); - softc->rd_size = 1024 * 1024; -#ifdef CTL_RAMDISK_PAGES - softc->num_pages = softc->rd_size / PAGE_SIZE; - softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * - softc->num_pages, M_RAMDISK, - M_WAITOK); - for (i = 0; i < softc->num_pages; i++) - softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); -#else - softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, - M_WAITOK); -#endif - return (0); } static int ctl_backend_ramdisk_shutdown(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_be_ramdisk_lun *lun, *next_lun; -#ifdef CTL_RAMDISK_PAGES - int i; -#endif mtx_lock(&softc->lock); STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) { /* * Drop our lock here. Since ctl_invalidate_lun() can call * back into us, this could potentially lead to a recursive * lock of the same mutex, which would cause a hang. */ mtx_unlock(&softc->lock); ctl_disable_lun(&lun->cbe_lun); ctl_invalidate_lun(&lun->cbe_lun); mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); - -#ifdef CTL_RAMDISK_PAGES - for (i = 0; i < softc->num_pages; i++) - free(softc->ramdisk_pages[i], M_RAMDISK); - free(softc->ramdisk_pages, M_RAMDISK); -#else - free(softc->ramdisk_buffer, M_RAMDISK); -#endif mtx_destroy(&softc->lock); return (0); } +static uint8_t * +ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, + getpage_op_t op) +{ + uint8_t **p, ***pp; + off_t i; + int s; + + if (be_lun->cap_bytes == 0) { + switch (op) { + case GP_READ: + return (be_lun->zero_page); + case GP_WRITE: + return ((uint8_t *)be_lun->pages); + case GP_ANCHOR: + return (P_ANCHORED); + default: + return (P_UNMAPPED); + } + } + if (op == GP_WRITE || op == GP_ANCHOR) { + sx_xlock(&be_lun->page_lock); + pp = &be_lun->pages; + for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { + if (*pp == NULL) { + *pp = malloc(PAGE_SIZE, M_RAMDISK, + M_WAITOK|M_ZERO); + } + i = pn >> s; + pp = (uint8_t ***)&(*pp)[i]; + pn -= i << s; + } + if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { + if (op == GP_WRITE) { + *pp = malloc(be_lun->pblocksize, M_RAMDISK, + M_WAITOK|M_ZERO); + } else + *pp = P_ANCHORED; + be_lun->cap_used += be_lun->pblocksize; + } else if (*pp == P_ANCHORED && op == GP_WRITE) { + *pp = malloc(be_lun->pblocksize, M_RAMDISK, + M_WAITOK|M_ZERO); + } + sx_xunlock(&be_lun->page_lock); + return ((uint8_t *)*pp); + } else { + sx_slock(&be_lun->page_lock); + p = be_lun->pages; + for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { + if (p == NULL) + break; + i = pn >> s; + p = (uint8_t **)p[i]; + pn -= i << s; + } + sx_sunlock(&be_lun->page_lock); + if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) + return (be_lun->zero_page); + return ((uint8_t *)p); + } +}; + +static void +ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) +{ + uint8_t ***pp; + off_t i; + int s; + + if (be_lun->cap_bytes == 0) + return; + sx_xlock(&be_lun->page_lock); + pp = &be_lun->pages; + for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { + if (*pp == NULL) + goto noindir; + i = pn >> s; + pp = (uint8_t ***)&(*pp)[i]; + pn -= i << s; + } + if (*pp == P_ANCHORED) { + be_lun->cap_used -= be_lun->pblocksize; + *pp = P_UNMAPPED; + } else if (*pp != P_UNMAPPED) { + free(*pp, M_RAMDISK); + be_lun->cap_used -= be_lun->pblocksize; + *pp = P_UNMAPPED; + } +noindir: + sx_xunlock(&be_lun->page_lock); +}; + +static void +ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) +{ + uint8_t ***pp; + off_t i; + int s; + + if (be_lun->cap_bytes == 0) + return; + sx_xlock(&be_lun->page_lock); + pp = &be_lun->pages; + for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { + if (*pp == NULL) + goto noindir; + i = pn >> s; + pp = (uint8_t ***)&(*pp)[i]; + pn -= i << s; + } + if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { + be_lun->cap_used += be_lun->pblocksize; + *pp = P_ANCHORED; + } else if (*pp != P_ANCHORED) { + free(*pp, M_RAMDISK); + *pp = P_ANCHORED; + } +noindir: + sx_xunlock(&be_lun->page_lock); +}; + +static void +ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) +{ + int i; + + if (p == NULL) + return; + if (indir == 0) { + free(p, M_RAMDISK); + return; + } + for (i = 0; i < PPP; i++) { + if (p[i] == NULL) + continue; + ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); + } + free(p, M_RAMDISK); +}; + +static size_t +cmp(uint8_t *a, uint8_t *b, size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) { + if (a[i] != b[i]) + break; + } + return (i); +} + static int +ctl_backend_ramdisk_cmp(union ctl_io *io) +{ + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; + uint8_t *page; + uint8_t info[8]; + uint64_t lba; + u_int lbaoff, lbas, res, off; + + lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; + lba = ARGS(io)->lba + PRIV(io)->len - lbas; + off = 0; + for (; lbas > 0; lbas--, lba++) { + page = ctl_backend_ramdisk_getpage(be_lun, + lba >> cbe_lun->pblockexp, GP_READ); + lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); + page += lbaoff * cbe_lun->blocksize; + res = cmp(io->scsiio.kern_data_ptr + off, page, + cbe_lun->blocksize); + off += res; + if (res < cbe_lun->blocksize) + break; + } + if (lbas > 0) { + off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; + scsi_u64to8b(off, info); + ctl_set_sense(&io->scsiio, /*current_error*/ 1, + /*sense_key*/ SSD_KEY_MISCOMPARE, + /*asc*/ 0x1D, /*ascq*/ 0x00, + /*type*/ SSD_ELEM_INFO, + /*size*/ sizeof(info), /*data*/ &info, + /*type*/ SSD_ELEM_NONE); + return (1); + } + return (0); +} + +static int ctl_backend_ramdisk_move_done(union ctl_io *io) { - struct ctl_be_lun *cbe_lun; - struct ctl_be_ramdisk_lun *be_lun; + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); - cbe_lun = CTL_BACKEND_LUN(io); - be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun; #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; if (io->scsiio.kern_sg_entries > 0) free(io->scsiio.kern_data_ptr, M_RAMDISK); io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; if (io->io_hdr.flags & CTL_FLAG_ABORT) { ; } else if (io->io_hdr.port_status != 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } else if (io->scsiio.kern_data_resid != 0 && (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_invalid_field_ciu(&io->scsiio); } else if ((io->io_hdr.port_status == 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { - if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) { + if (ARGS(io)->flags & CTL_LLF_COMPARE) { + /* We have data block ready for comparison. */ + if (ctl_backend_ramdisk_cmp(io)) + goto done; + } + if (ARGS(io)->len > PRIV(io)->len) { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->cont_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (0); } ctl_set_success(&io->scsiio); } +done: ctl_data_submit_done(io); return(0); } -static int -ctl_backend_ramdisk_submit(union ctl_io *io) +static void +ctl_backend_ramdisk_compare(union ctl_io *io) { - struct ctl_be_lun *cbe_lun; - struct ctl_lba_len_flags *lbalen; + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + u_int lbas, len; - cbe_lun = CTL_BACKEND_LUN(io); - lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; - if (lbalen->flags & CTL_LLF_VERIFY) { - ctl_set_success(&io->scsiio); - ctl_data_submit_done(io); - return (CTL_RETVAL_COMPLETE); - } - io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer = - lbalen->len * cbe_lun->blocksize; - ctl_backend_ramdisk_continue(io); - return (CTL_RETVAL_COMPLETE); + lbas = ARGS(io)->len - PRIV(io)->len; + lbas = MIN(lbas, 131072 / cbe_lun->blocksize); + len = lbas * cbe_lun->blocksize; + + io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; + io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); + io->scsiio.kern_data_len = len; + io->scsiio.kern_sg_entries = 0; + io->io_hdr.flags |= CTL_FLAG_ALLOCATED; + PRIV(io)->len += lbas; +#ifdef CTL_TIME_IO + getbinuptime(&io->io_hdr.dma_start_bt); +#endif + ctl_datamove(io); } static void -ctl_backend_ramdisk_continue(union ctl_io *io) +ctl_backend_ramdisk_rw(union ctl_io *io) { - struct ctl_be_ramdisk_softc *softc; - int len, len_filled, sg_filled; -#ifdef CTL_RAMDISK_PAGES + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; struct ctl_sg_entry *sg_entries; - int i; -#endif + uint8_t *page; + uint64_t lba; + u_int i, len, lbaoff, lbas, sgs, off; + getpage_op_t op; - softc = &rd_softc; - len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer; -#ifdef CTL_RAMDISK_PAGES - sg_filled = min(btoc(len), softc->num_pages); - if (sg_filled > 1) { + lba = ARGS(io)->lba + PRIV(io)->len; + lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); + lbas = ARGS(io)->len - PRIV(io)->len; + lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); + sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; + off = lbaoff * cbe_lun->blocksize; + op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; + if (sgs > 1) { io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * - sg_filled, M_RAMDISK, - M_WAITOK); + sgs, M_RAMDISK, M_WAITOK); sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; - for (i = 0, len_filled = 0; i < sg_filled; i++) { - sg_entries[i].addr = softc->ramdisk_pages[i]; - sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled); - len_filled += sg_entries[i].len; + len = lbas * cbe_lun->blocksize; + for (i = 0; i < sgs; i++) { + page = ctl_backend_ramdisk_getpage(be_lun, + (lba >> cbe_lun->pblockexp) + i, op); + if (page == P_UNMAPPED || page == P_ANCHORED) { + free(io->scsiio.kern_data_ptr, M_RAMDISK); +nospc: + ctl_set_space_alloc_fail(&io->scsiio); + ctl_data_submit_done(io); + return; + } + sg_entries[i].addr = page + off; + sg_entries[i].len = MIN(len, be_lun->pblocksize - off); + len -= sg_entries[i].len; + off = 0; } } else { - sg_filled = 0; - len_filled = len; - io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; + page = ctl_backend_ramdisk_getpage(be_lun, + lba >> cbe_lun->pblockexp, op); + if (page == P_UNMAPPED || page == P_ANCHORED) + goto nospc; + sgs = 0; + io->scsiio.kern_data_ptr = page + off; } -#else - sg_filled = 0; - len_filled = min(len, softc->rd_size); - io->scsiio.kern_data_ptr = softc->ramdisk_buffer; -#endif /* CTL_RAMDISK_PAGES */ io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; - io->scsiio.kern_data_len = len_filled; - io->scsiio.kern_sg_entries = sg_filled; + io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; + io->scsiio.kern_sg_entries = sgs; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; - io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled; + PRIV(io)->len += lbas; #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif ctl_datamove(io); } +static int +ctl_backend_ramdisk_submit(union ctl_io *io) +{ + struct ctl_lba_len_flags *lbalen = ARGS(io); + + if (lbalen->flags & CTL_LLF_VERIFY) { + ctl_set_success(&io->scsiio); + ctl_data_submit_done(io); + return (CTL_RETVAL_COMPLETE); + } + PRIV(io)->len = 0; + if (lbalen->flags & CTL_LLF_COMPARE) + ctl_backend_ramdisk_compare(io); + else + ctl_backend_ramdisk_rw(io); + return (CTL_RETVAL_COMPLETE); +} + static void ctl_backend_ramdisk_worker(void *context, int pending) { struct ctl_be_ramdisk_lun *be_lun; union ctl_io *io; be_lun = (struct ctl_be_ramdisk_lun *)context; - mtx_lock(&be_lun->queue_lock); for (;;) { io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); if (io != NULL) { STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, ctl_io_hdr, links); mtx_unlock(&be_lun->queue_lock); - ctl_backend_ramdisk_continue(io); + if (ARGS(io)->flags & CTL_LLF_COMPARE) + ctl_backend_ramdisk_compare(io); + else + ctl_backend_ramdisk_rw(io); mtx_lock(&be_lun->queue_lock); continue; } /* * If we get here, there is no work left in the queues, so * just break out and let the task queue go to sleep. */ break; } mtx_unlock(&be_lun->queue_lock); } static int +ctl_backend_ramdisk_gls(union ctl_io *io) +{ + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; + struct scsi_get_lba_status_data *data; + uint8_t *page; + u_int lbaoff; + + data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; + scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); + lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); + scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); + page = ctl_backend_ramdisk_getpage(be_lun, + ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); + if (page == P_UNMAPPED) + data->descr[0].status = 1; + else if (page == P_ANCHORED) + data->descr[0].status = 2; + else + data->descr[0].status = 0; + ctl_config_read_done(io); + return (CTL_RETVAL_COMPLETE); +} + +static int +ctl_backend_ramdisk_config_read(union ctl_io *io) +{ + int retval = 0; + + switch (io->scsiio.cdb[0]) { + case SERVICE_ACTION_IN: + if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { + retval = ctl_backend_ramdisk_gls(io); + break; + } + ctl_set_invalid_field(&io->scsiio, + /*sks_valid*/ 1, + /*command*/ 1, + /*field*/ 1, + /*bit_valid*/ 1, + /*bit*/ 4); + ctl_config_read_done(io); + retval = CTL_RETVAL_COMPLETE; + break; + default: + ctl_set_invalid_opcode(&io->scsiio); + ctl_config_read_done(io); + retval = CTL_RETVAL_COMPLETE; + break; + } + return (retval); +} + +static void +ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, + int anchor) +{ + struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; + uint8_t *page; + uint64_t p, lp; + u_int lbaoff; + getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; + + /* Partially zero first partial page. */ + p = lba >> cbe_lun->pblockexp; + lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); + if (lbaoff != 0) { + page = ctl_backend_ramdisk_getpage(be_lun, p, op); + if (page != P_UNMAPPED && page != P_ANCHORED) { + memset(page + lbaoff * cbe_lun->blocksize, 0, + min(len, be_lun->pblockmul - lbaoff) * + cbe_lun->blocksize); + } + p++; + } + + /* Partially zero last partial page. */ + lp = (lba + len) >> cbe_lun->pblockexp; + lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); + if (p <= lp && lbaoff != 0) { + page = ctl_backend_ramdisk_getpage(be_lun, lp, op); + if (page != P_UNMAPPED && page != P_ANCHORED) + memset(page, 0, lbaoff * cbe_lun->blocksize); + } + + /* Delete remaining full pages. */ + if (anchor) { + for (; p < lp; p++) + ctl_backend_ramdisk_anchorpage(be_lun, p); + } else { + for (; p < lp; p++) + ctl_backend_ramdisk_unmappage(be_lun, p); + } +} + +static void +ctl_backend_ramdisk_ws(union ctl_io *io) +{ + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; + struct ctl_lba_len_flags *lbalen = ARGS(io); + uint8_t *page; + uint64_t lba; + u_int lbaoff, lbas; + + if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { + ctl_set_invalid_field(&io->scsiio, + /*sks_valid*/ 1, + /*command*/ 1, + /*field*/ 1, + /*bit_valid*/ 0, + /*bit*/ 0); + ctl_config_write_done(io); + return; + } + if (lbalen->flags & SWS_UNMAP) { + ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, + (lbalen->flags & SWS_ANCHOR) != 0); + ctl_set_success(&io->scsiio); + ctl_config_write_done(io); + return; + } + + for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { + page = ctl_backend_ramdisk_getpage(be_lun, + lba >> cbe_lun->pblockexp, GP_WRITE); + if (page == P_UNMAPPED || page == P_ANCHORED) { + ctl_set_space_alloc_fail(&io->scsiio); + ctl_data_submit_done(io); + return; + } + lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); + page += lbaoff * cbe_lun->blocksize; + if (lbalen->flags & SWS_NDOB) { + memset(page, 0, cbe_lun->blocksize); + } else { + memcpy(page, io->scsiio.kern_data_ptr, + cbe_lun->blocksize); + } + if (lbalen->flags & SWS_LBDATA) + scsi_ulto4b(lba, page); + } + ctl_set_success(&io->scsiio); + ctl_config_write_done(io); +} + +static void +ctl_backend_ramdisk_unmap(union ctl_io *io) +{ + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); + struct scsi_unmap_desc *buf, *end; + + if ((ptrlen->flags & ~SU_ANCHOR) != 0) { + ctl_set_invalid_field(&io->scsiio, + /*sks_valid*/ 0, + /*command*/ 0, + /*field*/ 0, + /*bit_valid*/ 0, + /*bit*/ 0); + ctl_config_write_done(io); + return; + } + + buf = (struct scsi_unmap_desc *)ptrlen->ptr; + end = buf + ptrlen->len / sizeof(*buf); + for (; buf < end; buf++) { + ctl_backend_ramdisk_delete(cbe_lun, + scsi_8btou64(buf->lba), scsi_4btoul(buf->length), + (ptrlen->flags & SU_ANCHOR) != 0); + } + + ctl_set_success(&io->scsiio); + ctl_config_write_done(io); +} + +static int +ctl_backend_ramdisk_config_write(union ctl_io *io) +{ + struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); + int retval = 0; + + switch (io->scsiio.cdb[0]) { + case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: + /* We have no cache to flush. */ + ctl_set_success(&io->scsiio); + ctl_config_write_done(io); + break; + case START_STOP_UNIT: { + struct scsi_start_stop_unit *cdb; + + cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; + if ((cdb->how & SSS_PC_MASK) != 0) { + ctl_set_success(&io->scsiio); + ctl_config_write_done(io); + break; + } + if (cdb->how & SSS_START) { + if (cdb->how & SSS_LOEJ) + ctl_lun_has_media(cbe_lun); + ctl_start_lun(cbe_lun); + } else { + ctl_stop_lun(cbe_lun); + if (cdb->how & SSS_LOEJ) + ctl_lun_ejected(cbe_lun); + } + ctl_set_success(&io->scsiio); + ctl_config_write_done(io); + break; + } + case PREVENT_ALLOW: + ctl_set_success(&io->scsiio); + ctl_config_write_done(io); + break; + case WRITE_SAME_10: + case WRITE_SAME_16: + ctl_backend_ramdisk_ws(io); + break; + case UNMAP: + ctl_backend_ramdisk_unmap(io); + break; + default: + ctl_set_invalid_opcode(&io->scsiio); + ctl_config_write_done(io); + retval = CTL_RETVAL_COMPLETE; + break; + } + + return (retval); +} + +static uint64_t +ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname) +{ + struct ctl_be_ramdisk_lun *be_lun = arg; + uint64_t val; + + val = UINT64_MAX; + if (be_lun->cap_bytes == 0) + return (val); + sx_slock(&be_lun->page_lock); + if (strcmp(attrname, "blocksused") == 0) { + val = be_lun->cap_used / be_lun->cbe_lun.blocksize; + } else if (strcmp(attrname, "blocksavail") == 0) { + val = (be_lun->cap_bytes - be_lun->cap_used) / + be_lun->cbe_lun.blocksize; + } + sx_sunlock(&be_lun->page_lock); + return (val); +} + +static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_lun_req *lun_req; int retval; retval = 0; switch (cmd) { case CTL_LUN_REQ: lun_req = (struct ctl_lun_req *)addr; switch (lun_req->reqtype) { case CTL_LUNREQ_CREATE: retval = ctl_backend_ramdisk_create(softc, lun_req); break; case CTL_LUNREQ_RM: retval = ctl_backend_ramdisk_rm(softc, lun_req); break; case CTL_LUNREQ_MODIFY: retval = ctl_backend_ramdisk_modify(softc, lun_req); break; default: lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "%s: invalid LUN request type %d", __func__, lun_req->reqtype); break; } break; default: retval = ENOTTY; break; } return (retval); } static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_lun_rm_params *params; int retval; params = &req->reqdata.rm; mtx_lock(&softc->lock); STAILQ_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } retval = ctl_disable_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: error %d returned from ctl_disable_lun() for " "LUN %d", __func__, retval, params->lun_id); goto bailout_error; } /* * Set the waiting flag before we invalidate the LUN. Our shutdown * routine can be called any time after we invalidate the LUN, * and can be called from our context. * * This tells the shutdown routine that we're waiting, or we're * going to wait for the shutdown to happen. */ mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); retval = ctl_invalidate_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: error %d returned from ctl_invalidate_lun() for " "LUN %d", __func__, retval, params->lun_id); mtx_lock(&softc->lock); be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); goto bailout_error; } mtx_lock(&softc->lock); while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; /* * We only remove this LUN from the list and free it (below) if * retval == 0. If the user interrupted the wait, we just bail out * without actually freeing the LUN. We let the shutdown routine * free the LUN if that happens. */ if (retval == 0) { STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; } mtx_unlock(&softc->lock); if (retval == 0) { taskqueue_drain_all(be_lun->io_taskqueue); taskqueue_free(be_lun->io_taskqueue); ctl_free_opts(&be_lun->cbe_lun.options); + free(be_lun->zero_page, M_RAMDISK); + ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); + sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); free(be_lun, M_RAMDISK); } req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_create_params *params; char *value; char tmpstr[32]; + uint64_t t; int retval; retval = 0; params = &req->reqdata.create; be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); cbe_lun = &be_lun->cbe_lun; cbe_lun->be_lun = be_lun; be_lun->params = req->reqdata.create; be_lun->softc = softc; sprintf(be_lun->lunname, "cram%d", softc->num_luns); ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); if (params->flags & CTL_LUN_FLAG_DEV_TYPE) cbe_lun->lun_type = params->device_type; else cbe_lun->lun_type = T_DIRECT; be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; cbe_lun->flags = 0; value = ctl_get_opt(&cbe_lun->options, "ha_role"); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; + be_lun->pblocksize = PAGE_SIZE; + value = ctl_get_opt(&cbe_lun->options, "pblocksize"); + if (value != NULL) { + ctl_expand_number(value, &t); + be_lun->pblocksize = t; + } + if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { + snprintf(req->error_str, sizeof(req->error_str), + "%s: unsupported pblocksize %u", __func__, + be_lun->pblocksize); + goto bailout_error; + } + if (cbe_lun->lun_type == T_DIRECT || cbe_lun->lun_type == T_CDROM) { if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; + be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; + if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { + snprintf(req->error_str, sizeof(req->error_str), + "%s: pblocksize %u not exp2 of blocksize %u", + __func__, + be_lun->pblocksize, cbe_lun->blocksize); + goto bailout_error; + } if (params->lun_size_bytes < cbe_lun->blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, params->lun_size_bytes, cbe_lun->blocksize); goto bailout_error; } be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; + be_lun->indir = 0; + t = be_lun->size_bytes / be_lun->pblocksize; + while (t > 1) { + t /= PPP; + be_lun->indir++; + } cbe_lun->maxlba = be_lun->size_blocks - 1; - cbe_lun->atomicblock = UINT32_MAX; - cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize; + cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; + cbe_lun->pblockoff = 0; + cbe_lun->ublockexp = cbe_lun->pblockexp; + cbe_lun->ublockoff = 0; + cbe_lun->atomicblock = be_lun->pblocksize; + cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; + value = ctl_get_opt(&cbe_lun->options, "capacity"); + if (value != NULL) + ctl_expand_number(value, &be_lun->cap_bytes); + } else { + be_lun->pblockmul = 1; + cbe_lun->pblockexp = 0; } /* Tell the user the blocksize we ended up using */ params->blocksize_bytes = cbe_lun->blocksize; params->lun_size_bytes = be_lun->size_bytes; value = ctl_get_opt(&cbe_lun->options, "unmap"); - if (value != NULL && strcmp(value, "on") == 0) + if (value == NULL || strcmp(value, "off") != 0) cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; value = ctl_get_opt(&cbe_lun->options, "readonly"); if (value != NULL) { if (strcmp(value, "on") == 0) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; } else if (cbe_lun->lun_type != T_DIRECT) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; value = ctl_get_opt(&cbe_lun->options, "serseq"); if (value != NULL && strcmp(value, "on") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; else if (value != NULL && strcmp(value, "off") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; if (params->flags & CTL_LUN_FLAG_ID_REQ) { cbe_lun->req_lun_id = params->req_lun_id; cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; } else cbe_lun->req_lun_id = 0; cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status; cbe_lun->be = &ctl_be_ramdisk_driver; if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", softc->num_luns); strncpy((char *)cbe_lun->serial_num, tmpstr, MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); /* Tell the user what we used for a serial number */ strncpy((char *)params->serial_num, tmpstr, MIN(sizeof(params->serial_num), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->serial_num, params->serial_num, MIN(sizeof(cbe_lun->serial_num), sizeof(params->serial_num))); } if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); strncpy((char *)cbe_lun->device_id, tmpstr, MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); /* Tell the user what we used for a device ID */ strncpy((char *)params->device_id, tmpstr, MIN(sizeof(params->device_id), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->device_id, params->device_id, MIN(sizeof(cbe_lun->device_id), sizeof(params->device_id))); } STAILQ_INIT(&be_lun->cont_queue); + sx_init(&be_lun->page_lock, "cram page lock"); + if (be_lun->cap_bytes == 0) + be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); + be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, + M_WAITOK|M_ZERO); mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, be_lun); be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); if (be_lun->io_taskqueue == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: Unable to create taskqueue", __func__); goto bailout_error; } retval = taskqueue_start_threads(&be_lun->io_taskqueue, /*num threads*/1, /*priority*/PWAIT, /*thread name*/ "%s taskq", be_lun->lunname); if (retval != 0) goto bailout_error; mtx_lock(&softc->lock); softc->num_luns++; STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); mtx_unlock(&softc->lock); retval = ctl_add_lun(&be_lun->cbe_lun); if (retval != 0) { mtx_lock(&softc->lock); STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; mtx_unlock(&softc->lock); snprintf(req->error_str, sizeof(req->error_str), "%s: ctl_add_lun() returned error %d, see dmesg for " "details", __func__, retval); retval = 0; goto bailout_error; } mtx_lock(&softc->lock); /* * Tell the config_status routine that we're waiting so it won't * clean up the LUN in the event of an error. */ be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN configuration error, see dmesg for details", __func__); STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; mtx_unlock(&softc->lock); goto bailout_error; } else { params->req_lun_id = cbe_lun->lun_id; } mtx_unlock(&softc->lock); req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; if (be_lun != NULL) { - if (be_lun->io_taskqueue != NULL) { + if (be_lun->io_taskqueue != NULL) taskqueue_free(be_lun->io_taskqueue); - } ctl_free_opts(&cbe_lun->options); + free(be_lun->zero_page, M_RAMDISK); + ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); + sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); free(be_lun, M_RAMDISK); } return (retval); } static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_modify_params *params; char *value; uint32_t blocksize; int wasprim; params = &req->reqdata.modify; mtx_lock(&softc->lock); STAILQ_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (params->lun_size_bytes != 0) be_lun->params.lun_size_bytes = params->lun_size_bytes; ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); value = ctl_get_opt(&cbe_lun->options, "ha_role"); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ctl_lun_primary(cbe_lun); else ctl_lun_secondary(cbe_lun); } blocksize = be_lun->cbe_lun.blocksize; if (be_lun->params.lun_size_bytes < blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, be_lun->params.lun_size_bytes, blocksize); goto bailout_error; } be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; be_lun->size_bytes = be_lun->size_blocks * blocksize; be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; ctl_lun_capacity_changed(&be_lun->cbe_lun); /* Tell the user the exact size we ended up using */ params->lun_size_bytes = be_lun->size_bytes; req->status = CTL_LUN_OK; return (0); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static void ctl_backend_ramdisk_lun_shutdown(void *be_lun) { struct ctl_be_ramdisk_lun *lun; struct ctl_be_ramdisk_softc *softc; int do_free; lun = (struct ctl_be_ramdisk_lun *)be_lun; softc = lun->softc; do_free = 0; mtx_lock(&softc->lock); lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { wakeup(lun); } else { STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, links); softc->num_luns--; do_free = 1; } mtx_unlock(&softc->lock); if (do_free != 0) free(be_lun, M_RAMDISK); } static void ctl_backend_ramdisk_lun_config_status(void *be_lun, ctl_lun_config_status status) { struct ctl_be_ramdisk_lun *lun; struct ctl_be_ramdisk_softc *softc; lun = (struct ctl_be_ramdisk_lun *)be_lun; softc = lun->softc; if (status == CTL_LUN_CONFIG_OK) { mtx_lock(&softc->lock); lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) wakeup(lun); mtx_unlock(&softc->lock); /* * We successfully added the LUN, attempt to enable it. */ if (ctl_enable_lun(&lun->cbe_lun) != 0) { printf("%s: ctl_enable_lun() failed!\n", __func__); if (ctl_invalidate_lun(&lun->cbe_lun) != 0) { printf("%s: ctl_invalidate_lun() failed!\n", __func__); } } return; } mtx_lock(&softc->lock); lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; /* * If we have a user waiting, let him handle the cleanup. If not, * clean things up here. */ if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; wakeup(lun); } else { STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, links); softc->num_luns--; free(lun, M_RAMDISK); } mtx_unlock(&softc->lock); -} - -static int -ctl_backend_ramdisk_config_write(union ctl_io *io) -{ - struct ctl_be_lun *cbe_lun; - int retval; - - cbe_lun = CTL_BACKEND_LUN(io); - retval = 0; - switch (io->scsiio.cdb[0]) { - case SYNCHRONIZE_CACHE: - case SYNCHRONIZE_CACHE_16: - /* - * The upper level CTL code will filter out any CDBs with - * the immediate bit set and return the proper error. It - * will also not allow a sync cache command to go to a LUN - * that is powered down. - * - * We don't really need to worry about what LBA range the - * user asked to be synced out. When they issue a sync - * cache command, we'll sync out the whole thing. - * - * This is obviously just a stubbed out implementation. - * The real implementation will be in the RAIDCore/CTL - * interface, and can only really happen when RAIDCore - * implements a per-array cache sync. - */ - ctl_set_success(&io->scsiio); - ctl_config_write_done(io); - break; - case START_STOP_UNIT: { - struct scsi_start_stop_unit *cdb; - - cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; - if ((cdb->how & SSS_PC_MASK) != 0) { - ctl_set_success(&io->scsiio); - ctl_config_write_done(io); - break; - } - if (cdb->how & SSS_START) { - if (cdb->how & SSS_LOEJ) - ctl_lun_has_media(cbe_lun); - ctl_start_lun(cbe_lun); - } else { - ctl_stop_lun(cbe_lun); - if (cdb->how & SSS_LOEJ) - ctl_lun_ejected(cbe_lun); - } - ctl_set_success(&io->scsiio); - ctl_config_write_done(io); - break; - } - case PREVENT_ALLOW: - case WRITE_SAME_10: - case WRITE_SAME_16: - case UNMAP: - ctl_set_success(&io->scsiio); - ctl_config_write_done(io); - break; - default: - ctl_set_invalid_opcode(&io->scsiio); - ctl_config_write_done(io); - retval = CTL_RETVAL_COMPLETE; - break; - } - - return (retval); -} - -static int -ctl_backend_ramdisk_config_read(union ctl_io *io) -{ - int retval = 0; - - switch (io->scsiio.cdb[0]) { - case SERVICE_ACTION_IN: - if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { - /* We have nothing to tell, leave default data. */ - ctl_config_read_done(io); - retval = CTL_RETVAL_COMPLETE; - break; - } - ctl_set_invalid_field(&io->scsiio, - /*sks_valid*/ 1, - /*command*/ 1, - /*field*/ 1, - /*bit_valid*/ 1, - /*bit*/ 4); - ctl_config_read_done(io); - retval = CTL_RETVAL_COMPLETE; - break; - default: - ctl_set_invalid_opcode(&io->scsiio); - ctl_config_read_done(io); - retval = CTL_RETVAL_COMPLETE; - break; - } - - return (retval); } Index: projects/clang400-import/sys/cam/ctl/ctl_frontend.h =================================================================== --- projects/clang400-import/sys/cam/ctl/ctl_frontend.h (revision 312719) +++ projects/clang400-import/sys/cam/ctl/ctl_frontend.h (revision 312720) @@ -1,344 +1,336 @@ /*- * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.h#2 $ * $FreeBSD$ */ /* * CAM Target Layer front end registration hooks * * Author: Ken Merry */ #ifndef _CTL_FRONTEND_H_ #define _CTL_FRONTEND_H_ #include typedef enum { CTL_PORT_STATUS_NONE = 0x00, CTL_PORT_STATUS_ONLINE = 0x01, CTL_PORT_STATUS_HA_SHARED = 0x02 } ctl_port_status; typedef int (*fe_init_t)(void); typedef int (*fe_shutdown_t)(void); typedef void (*port_func_t)(void *onoff_arg); typedef int (*port_info_func_t)(void *onoff_arg, struct sbuf *sb); typedef int (*lun_func_t)(void *arg, int lun_id); typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); #define CTL_FRONTEND_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ return (ctl_frontend_register( \ (struct ctl_frontend *)data)); \ break; \ case MOD_UNLOAD: \ return (ctl_frontend_deregister( \ (struct ctl_frontend *)data)); \ break; \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \ MODULE_DEPEND(name, ctl, 1, 1, 1); \ MODULE_DEPEND(name, cam, 1, 1, 1) struct ctl_wwpn_iid { int in_use; time_t last_use; uint64_t wwpn; char *name; }; /* * The ctl_frontend structure is the registration mechanism between a FETD * (Front End Target Driver) and the CTL layer. Here is a description of * the fields: * * port_type: This field tells CTL what kind of front end it is * dealing with. This field serves two purposes. * The first is to let CTL know whether the frontend * in question is inside the main CTL module (i.e. * the ioctl front end), and therefore its module * reference count shouldn't be incremented. The * CTL ioctl front end should continue to use the * CTL_PORT_IOCTL argument as long as it is part of * the main CTL module. The second is to let CTL * know what kind of front end it is dealing with, so * it can return the proper inquiry data for that * particular port. * * num_requested_ctl_io: This is the number of ctl_io structures that the * front end needs for its pool. This should * generally be the maximum number of outstanding * transactions that the FETD can handle. The CTL * layer will add a few to this to account for * ctl_io buffers queued for pending sense data. * (Pending sense only gets queued if the FETD * doesn't support autosense. e.g. non-packetized * parallel SCSI doesn't support autosense.) * * port_name: A string describing the FETD. e.g. "LSI 1030T U320" * or whatever you want to use to describe the driver. * - * * physical_port: This is the physical port number of this * particular port within the driver/hardware. This * number is hardware/driver specific. * virtual_port: This is the virtual port number of this * particular port. This is for things like NP-IV. * * port_online(): This function is called, with onoff_arg as its * argument, by the CTL layer when it wants the FETD * to start responding to selections on the specified * target ID. * * port_offline(): This function is called, with onoff_arg as its * argument, by the CTL layer when it wants the FETD * to stop responding to selection on the specified * target ID. * * onoff_arg: This is supplied as an argument to port_online() * and port_offline(). This is specified by the * FETD. * * lun_enable(): This function is called, with targ_lun_arg, a target * ID and a LUN ID as its arguments, by CTL when it * wants the FETD to enable a particular LUN. If the * FETD doesn't really know about LUNs, it should * just ignore this call and return 0. If the FETD * cannot enable the requested LUN for some reason, the * FETD should return non-zero status. * * lun_disable(): This function is called, with targ_lun_arg, a target * ID and LUN ID as its arguments, by CTL when it * wants the FETD to disable a particular LUN. If the * FETD doesn't really know about LUNs, it should just * ignore this call and return 0. If the FETD cannot * disable the requested LUN for some reason, the * FETD should return non-zero status. * * targ_lun_arg: This is supplied as an argument to the targ/lun * enable/disable() functions. This is specified by * the FETD. * * fe_datamove(): This function is called one or more times per I/O * by the CTL layer to tell the FETD to initiate a * DMA to or from the data buffer(s) specified by * the passed-in ctl_io structure. * * fe_done(): This function is called by the CTL layer when a * particular SCSI I/O or task management command has * completed. For SCSI I/O requests (CTL_IO_SCSI), * sense data is always supplied if the status is * CTL_SCSI_ERROR and the SCSI status byte is * SCSI_STATUS_CHECK_COND. If the FETD doesn't * support autosense, the sense should be queued * back to the CTL layer via ctl_queue_sense(). * * fe_dump(): This function, if it exists, is called by CTL * to request a dump of any debugging information or * state to the console. * - * max_targets: The maximum number of targets that we can create - * per-port. - * - * max_target_id: The highest target ID that we can use. - * * targ_port: The CTL layer assigns a "port number" to every * FETD. This port number should be passed back in * in the header of every ctl_io that is queued to * the CTL layer. This enables us to determine * which bus the command came in on. * * ctl_pool_ref: Memory pool reference used by the FETD in calls to * ctl_alloc_io(). * * max_initiators: Maximum number of initiators that the FETD is * allowed to have. Initiators should be numbered * from 0 to max_initiators - 1. This value will * typically be 16, and thus not a problem for * parallel SCSI. This may present issues for Fibre * Channel. * * wwnn World Wide Node Name to be used by the FETD. * Note that this is set *after* registration. It * will be set prior to the online function getting * called. * * wwpn World Wide Port Name to be used by the FETD. * Note that this is set *after* registration. It * will be set prior to the online function getting * called. * * status: Used by CTL to keep track of per-FETD state. * * links: Linked list pointers, used by CTL. The FETD * shouldn't touch this field. */ struct ctl_port { struct ctl_softc *ctl_softc; struct ctl_frontend *frontend; ctl_port_type port_type; /* passed to CTL */ int num_requested_ctl_io; /* passed to CTL */ char *port_name; /* passed to CTL */ int physical_port; /* passed to CTL */ int virtual_port; /* passed to CTL */ port_func_t port_online; /* passed to CTL */ port_func_t port_offline; /* passed to CTL */ port_info_func_t port_info; /* passed to CTL */ void *onoff_arg; /* passed to CTL */ lun_func_t lun_enable; /* passed to CTL */ lun_func_t lun_disable; /* passed to CTL */ int lun_map_size; /* passed to CTL */ uint32_t *lun_map; /* passed to CTL */ void *targ_lun_arg; /* passed to CTL */ void (*fe_datamove)(union ctl_io *io); /* passed to CTL */ void (*fe_done)(union ctl_io *io); /* passed to CTL */ - int max_targets; /* passed to CTL */ - int max_target_id; /* passed to CTL */ int32_t targ_port; /* passed back to FETD */ void *ctl_pool_ref; /* passed back to FETD */ uint32_t max_initiators; /* passed back to FETD */ struct ctl_wwpn_iid *wwpn_iid; /* used by CTL */ uint64_t wwnn; /* set by CTL before online */ uint64_t wwpn; /* set by CTL before online */ ctl_port_status status; /* used by CTL */ ctl_options_t options; /* passed to CTL */ struct ctl_devid *port_devid; /* passed to CTL */ struct ctl_devid *target_devid; /* passed to CTL */ struct ctl_devid *init_devid; /* passed to CTL */ struct ctl_io_stats stats; /* used by CTL */ struct mtx port_lock; /* used by CTL */ STAILQ_ENTRY(ctl_port) fe_links; /* used by CTL */ STAILQ_ENTRY(ctl_port) links; /* used by CTL */ }; struct ctl_frontend { char name[CTL_DRIVER_NAME_LEN]; /* passed to CTL */ fe_init_t init; /* passed to CTL */ fe_ioctl_t ioctl; /* passed to CTL */ void (*fe_dump)(void); /* passed to CTL */ fe_shutdown_t shutdown; /* passed to CTL */ STAILQ_HEAD(, ctl_port) port_list; /* used by CTL */ STAILQ_ENTRY(ctl_frontend) links; /* used by CTL */ }; /* * This may block until resources are allocated. Called at FETD module load * time. Returns 0 for success, non-zero for failure. */ int ctl_frontend_register(struct ctl_frontend *fe); /* * Called at FETD module unload time. * Returns 0 for success, non-zero for failure. */ int ctl_frontend_deregister(struct ctl_frontend *fe); /* * Find the frontend by its name. Returns NULL if not found. */ struct ctl_frontend * ctl_frontend_find(char *frontend_name); /* * This may block until resources are allocated. Called at FETD module load * time. Returns 0 for success, non-zero for failure. */ int ctl_port_register(struct ctl_port *port); /* * Called at FETD module unload time. * Returns 0 for success, non-zero for failure. */ int ctl_port_deregister(struct ctl_port *port); /* * Called to set the WWNN and WWPN for a particular frontend. */ void ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn, int wwpn_valid, uint64_t wwpn); /* * Called to bring a particular frontend online. */ void ctl_port_online(struct ctl_port *fe); /* * Called to take a particular frontend offline. */ void ctl_port_offline(struct ctl_port *fe); /* * This routine queues I/O and task management requests from the FETD to the * CTL layer. Returns immediately. Returns 0 for success, non-zero for * failure. */ int ctl_queue(union ctl_io *io); /* * This routine is used if the front end interface doesn't support * autosense (e.g. non-packetized parallel SCSI). This will queue the * scsiio structure back to a per-lun pending sense queue. This MUST be * called BEFORE any request sense can get queued to the CTL layer -- I * need it in the queue in order to service the request. The scsiio * structure passed in here will be freed by the CTL layer when sense is * retrieved by the initiator. Returns 0 for success, non-zero for failure. */ int ctl_queue_sense(union ctl_io *io); /* * This routine adds an initiator to CTL's port database. * The iid field should be the same as the iid passed in the nexus of each * ctl_io from this initiator. * The WWPN should be the FC WWPN, if available. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name); /* * This routine will remove an initiator from CTL's port database. * The iid field should be the same as the iid passed in the nexus of each * ctl_io from this initiator. */ int ctl_remove_initiator(struct ctl_port *port, int iid); #endif /* _CTL_FRONTEND_H_ */ Index: projects/clang400-import/sys/cam/ctl/ctl_frontend_cam_sim.c =================================================================== --- projects/clang400-import/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 312719) +++ projects/clang400-import/sys/cam/ctl/ctl_frontend_cam_sim.c (revision 312720) @@ -1,816 +1,811 @@ /*- * Copyright (c) 2009 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_cam_sim.c#4 $ */ /* * CTL frontend to CAM SIM interface. This allows access to CTL LUNs via * the da(4) and pass(4) drivers from inside the system. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define io_ptr spriv_ptr1 struct cfcs_io { union ccb *ccb; }; struct cfcs_softc { struct ctl_port port; char port_name[32]; struct cam_sim *sim; struct cam_devq *devq; struct cam_path *path; struct mtx lock; uint64_t wwnn; uint64_t wwpn; uint32_t cur_tag_num; int online; }; /* * We can't handle CCBs with these flags. For the most part, we just don't * handle physical addresses yet. That would require mapping things in * order to do the copy. */ #define CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_MSG_BUF_PHYS | \ CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \ CAM_SENSE_PHYS) static int cfcs_init(void); static int cfcs_shutdown(void); static void cfcs_poll(struct cam_sim *sim); static void cfcs_online(void *arg); static void cfcs_offline(void *arg); static void cfcs_datamove(union ctl_io *io); static void cfcs_done(union ctl_io *io); void cfcs_action(struct cam_sim *sim, union ccb *ccb); struct cfcs_softc cfcs_softc; /* * This is primarily intended to allow for error injection to test the CAM * sense data and sense residual handling code. This sets the maximum * amount of SCSI sense data that we will report to CAM. */ static int cfcs_max_sense = sizeof(struct scsi_sense_data); SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD, 0, "CAM Target Layer SIM frontend"); SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW, &cfcs_max_sense, 0, "Maximum sense data size"); static struct ctl_frontend cfcs_frontend = { .name = "camsim", .init = cfcs_init, .shutdown = cfcs_shutdown, }; CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend); static int cfcs_init(void) { struct cfcs_softc *softc; struct ctl_port *port; int retval; softc = &cfcs_softc; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "ctl2cam", NULL, MTX_DEF); port = &softc->port; port->frontend = &cfcs_frontend; port->port_type = CTL_PORT_INTERNAL; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "camsim"); port->port_name = softc->port_name; port->port_online = cfcs_online; port->port_offline = cfcs_offline; port->onoff_arg = softc; port->fe_datamove = cfcs_datamove; port->fe_done = cfcs_done; - - /* XXX KDM what should we report here? */ - /* XXX These should probably be fetched from CTL. */ - port->max_targets = 1; - port->max_target_id = 15; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with error %d!\n", __func__, retval); mtx_destroy(&softc->lock); return (retval); } /* * If the CTL frontend didn't tell us what our WWNN/WWPN is, go * ahead and set something random. */ if (port->wwnn == 0) { uint64_t random_bits; arc4rand(&random_bits, sizeof(random_bits), 0); softc->wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000000000000000ULL | /* NL-Port */ 0x0300; softc->wwpn = softc->wwnn + port->targ_port + 1; ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn); } else { softc->wwnn = port->wwnn; softc->wwpn = port->wwpn; } mtx_lock(&softc->lock); softc->devq = cam_simq_alloc(port->num_requested_ctl_io); if (softc->devq == NULL) { printf("%s: error allocating devq\n", __func__); retval = ENOMEM; goto bailout; } softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name, softc, /*unit*/ 0, &softc->lock, 1, port->num_requested_ctl_io, softc->devq); if (softc->sim == NULL) { printf("%s: error allocating SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) { printf("%s: error registering SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_create_path(&softc->path, /*periph*/NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: error creating path\n", __func__); xpt_bus_deregister(cam_sim_path(softc->sim)); retval = EINVAL; goto bailout; } mtx_unlock(&softc->lock); return (retval); bailout: if (softc->sim) cam_sim_free(softc->sim, /*free_devq*/ TRUE); else if (softc->devq) cam_simq_free(softc->devq); mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); return (retval); } static int cfcs_shutdown(void) { struct cfcs_softc *softc = &cfcs_softc; struct ctl_port *port = &softc->port; int error; ctl_port_offline(port); mtx_lock(&softc->lock); xpt_free_path(softc->path); xpt_bus_deregister(cam_sim_path(softc->sim)); cam_sim_free(softc->sim, /*free_devq*/ TRUE); mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); if ((error = ctl_port_deregister(port)) != 0) printf("%s: cam_sim port deregistration failed\n", __func__); return (error); } static void cfcs_poll(struct cam_sim *sim) { } static void cfcs_onoffline(void *arg, int online) { struct cfcs_softc *softc; union ccb *ccb; softc = (struct cfcs_softc *)arg; mtx_lock(&softc->lock); softc->online = online; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("%s: unable to allocate CCB for rescan\n", __func__); goto bailout; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: can't allocate path for rescan\n", __func__); xpt_free_ccb(ccb); goto bailout; } xpt_rescan(ccb); bailout: mtx_unlock(&softc->lock); } static void cfcs_online(void *arg) { cfcs_onoffline(arg, /*online*/ 1); } static void cfcs_offline(void *arg) { cfcs_onoffline(arg, /*online*/ 0); } /* * This function is very similar to ctl_ioctl_do_datamove(). Is there a * way to combine the functionality? * * XXX KDM may need to move this into a thread. We're doing a bcopy in the * caller's context, which will usually be the backend. That may not be a * good thing. */ static void cfcs_datamove(union ctl_io *io) { union ccb *ccb; bus_dma_segment_t cam_sg_entry, *cam_sglist; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; int cam_sg_count, ctl_sg_count, cam_sg_start; int cam_sg_offset; int len_to_copy; int ctl_watermark, cam_watermark; int i, j; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; /* * Note that we have a check in cfcs_action() to make sure that any * CCBs with "bad" flags are returned with CAM_REQ_INVALID. This * is just to make sure no one removes that check without updating * this code to provide the additional functionality necessary to * support those modes of operation. */ KASSERT(((ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) == 0), ("invalid " "CAM flags %#x", (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS))); /* * Simplify things on both sides by putting single buffers into a * single entry S/G list. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_SG: { int len_seen; cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; cam_sg_count = ccb->csio.sglist_cnt; cam_sg_start = cam_sg_count; cam_sg_offset = 0; for (i = 0, len_seen = 0; i < cam_sg_count; i++) { if ((len_seen + cam_sglist[i].ds_len) >= io->scsiio.kern_rel_offset) { cam_sg_start = i; cam_sg_offset = io->scsiio.kern_rel_offset - len_seen; break; } len_seen += cam_sglist[i].ds_len; } break; } case CAM_DATA_VADDR: cam_sglist = &cam_sg_entry; cam_sglist[0].ds_len = ccb->csio.dxfer_len; cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr; cam_sg_count = 1; cam_sg_start = 0; cam_sg_offset = io->scsiio.kern_rel_offset; break; default: panic("Invalid CAM flags %#x", ccb->ccb_h.flags); } if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } ctl_watermark = 0; cam_watermark = cam_sg_offset; for (i = cam_sg_start, j = 0; i < cam_sg_count && j < ctl_sg_count;) { uint8_t *cam_ptr, *ctl_ptr; len_to_copy = MIN(cam_sglist[i].ds_len - cam_watermark, ctl_sglist[j].len - ctl_watermark); cam_ptr = (uint8_t *)cam_sglist[i].ds_addr; cam_ptr = cam_ptr + cam_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else ctl_ptr = (uint8_t *)ctl_sglist[j].addr; ctl_ptr = ctl_ptr + ctl_watermark; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", ctl_ptr, __func__, cam_ptr)); bcopy(ctl_ptr, cam_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", cam_ptr, __func__, ctl_ptr)); bcopy(cam_ptr, ctl_ptr, len_to_copy); } io->scsiio.ext_data_filled += len_to_copy; io->scsiio.kern_data_resid -= len_to_copy; cam_watermark += len_to_copy; if (cam_sglist[i].ds_len == cam_watermark) { i++; cam_watermark = 0; } ctl_watermark += len_to_copy; if (ctl_sglist[j].len == ctl_watermark) { j++; ctl_watermark = 0; } } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); } io->scsiio.be_move_done(io); } static void cfcs_done(union ctl_io *io) { union ccb *ccb; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; if (ccb == NULL) { ctl_free_io(io); return; } /* * At this point we should have status. If we don't, that's a bug. */ KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); /* * Translate CTL status to CAM status. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (io->io_hdr.status & CTL_STATUS_MASK) { case CTL_SUCCESS: ccb->ccb_h.status |= CAM_REQ_CMP; break; case CTL_SCSI_ERROR: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; ccb->csio.scsi_status = io->scsiio.scsi_status; bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data, min(io->scsiio.sense_len, ccb->csio.sense_len)); if (ccb->csio.sense_len > io->scsiio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - io->scsiio.sense_len; else ccb->csio.sense_resid = 0; if ((ccb->csio.sense_len - ccb->csio.sense_resid) > cfcs_max_sense) { ccb->csio.sense_resid = ccb->csio.sense_len - cfcs_max_sense; } break; case CTL_CMD_ABORTED: ccb->ccb_h.status |= CAM_REQ_ABORTED; break; case CTL_ERROR: default: ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(ccb); ctl_free_io(io); } void cfcs_action(struct cam_sim *sim, union ccb *ccb) { struct cfcs_softc *softc; int err; softc = (struct cfcs_softc *)cam_sim_softc(sim); mtx_assert(&softc->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { union ctl_io *io; struct ccb_scsiio *csio; csio = &ccb->csio; /* * Catch CCB flags, like physical address flags, that * indicate situations we currently can't handle. */ if (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) { ccb->ccb_h.status = CAM_REQ_INVALID; printf("%s: bad CCB flags %#x (all flags %#x)\n", __func__, ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS, ccb->ccb_h.flags); xpt_done(ccb); return; } /* * If we aren't online, there are no devices to see. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { printf("%s: can't allocate ctl_io\n", __func__); ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down via the XPT_RESET_BUS/LUN CCBs below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); /* * This tag scheme isn't the best, since we could in theory * have a very long-lived I/O and tag collision, especially * in a high I/O environment. But it should work well * enough for now. Since we're using unsigned ints, * they'll just wrap around. */ io->scsiio.tag_num = softc->cur_tag_num++; csio->tag_id = io->scsiio.tag_num; switch (csio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, csio->tag_action); break; } if (csio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, csio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(scsiio_cdb_ptr(csio), io->scsiio.cdb, io->scsiio.cdb_len); ccb->ccb_h.status |= CAM_SIM_QUEUED; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s: func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } break; } case XPT_ABORT: { union ctl_io *io; union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; if (abort_ccb->ccb_h.func_code != XPT_SCSI_IO) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); } /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = abort_ccb->csio.tag_id; switch (abort_ccb->csio.tag_action) { case CAM_TAG_ACTION_NONE: io->taskio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->taskio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->taskio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->taskio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->taskio.tag_type = CTL_TAG_ACA; break; default: io->taskio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, abort_ccb->csio.tag_action); break; } err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_fc *fc; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 800000; fc->wwnn = softc->wwnn; fc->wwpn = softc->wwpn; fc->port = softc->port.targ_port; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: /* XXX KDM should we actually do something here? */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_BUS: case XPT_RESET_DEV: { union ctl_io *io; /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ if (ccb->ccb_h.func_code == XPT_RESET_DEV) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); if (ccb->ccb_h.func_code == XPT_RESET_BUS) io->taskio.task_action = CTL_TASK_BUS_RESET; else io->taskio.task_action = CTL_TASK_LUN_RESET; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 0; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_EXTLUNS; cpi->hba_eng_cnt = 0; cpi->max_target = 1; cpi->max_lun = 1024; /* Do we really have a limit? */ cpi->maxio = 1024 * 1024; cpi->async_flags = 0; cpi->hpath_id = 0; cpi->initiator_id = 0; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = 0; cpi->bus_id = 0; cpi->base_transfer_speed = 800000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; /* * Pretend to be Fibre Channel. */ cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = softc->wwnn; cpi->xport_specific.fc.wwpn = softc->wwpn; cpi->xport_specific.fc.port = softc->port.targ_port; cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; printf("%s: unsupported CCB type %#x\n", __func__, ccb->ccb_h.func_code); xpt_done(ccb); break; } } Index: projects/clang400-import/sys/cam/ctl/ctl_frontend_ioctl.c =================================================================== --- projects/clang400-import/sys/cam/ctl/ctl_frontend_ioctl.c (revision 312719) +++ projects/clang400-import/sys/cam/ctl/ctl_frontend_ioctl.c (revision 312720) @@ -1,436 +1,434 @@ /*- * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { CTL_IOCTL_INPROG, CTL_IOCTL_DATAMOVE, CTL_IOCTL_DONE } ctl_fe_ioctl_state; struct ctl_fe_ioctl_params { struct cv sem; struct mtx ioctl_mtx; ctl_fe_ioctl_state state; }; struct cfi_softc { uint32_t cur_tag_num; struct ctl_port port; }; static struct cfi_softc cfi_softc; static int cfi_init(void); static int cfi_shutdown(void); static void cfi_datamove(union ctl_io *io); static void cfi_done(union ctl_io *io); static struct ctl_frontend cfi_frontend = { .name = "ioctl", .init = cfi_init, .shutdown = cfi_shutdown, }; CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend); static int cfi_init(void) { struct cfi_softc *isoftc = &cfi_softc; struct ctl_port *port; int error = 0; memset(isoftc, 0, sizeof(*isoftc)); port = &isoftc->port; port->frontend = &cfi_frontend; port->port_type = CTL_PORT_IOCTL; port->num_requested_ctl_io = 100; port->port_name = "ioctl"; port->fe_datamove = cfi_datamove; port->fe_done = cfi_done; - port->max_targets = 1; - port->max_target_id = 0; port->targ_port = -1; port->max_initiators = 1; if ((error = ctl_port_register(port)) != 0) { printf("%s: ioctl port registration failed\n", __func__); return (error); } ctl_port_online(port); return (0); } static int cfi_shutdown(void) { struct cfi_softc *isoftc = &cfi_softc; struct ctl_port *port = &isoftc->port; int error = 0; ctl_port_offline(port); if ((error = ctl_port_deregister(port)) != 0) printf("%s: ioctl port deregistration failed\n", __func__); return (error); } /* * Data movement routine for the CTL ioctl frontend port. */ static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) { struct ctl_sg_entry *ext_sglist, *kern_sglist; struct ctl_sg_entry ext_entry, kern_entry; int ext_sglen, ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; int len_to_copy; int kern_watermark, ext_watermark; int ext_sglist_malloced; int i, j; CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); /* * If this flag is set, fake the data transfer. */ if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { ext_sglist_malloced = 0; ctsio->ext_data_filled += ctsio->kern_data_len; ctsio->kern_data_resid = 0; goto bailout; } /* * To simplify things here, if we have a single buffer, stick it in * a S/G entry and just make it a single entry S/G list. */ if (ctsio->ext_sg_entries > 0) { int len_seen; ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, M_WAITOK); ext_sglist_malloced = 1; if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) { ctsio->io_hdr.port_status = 31343; goto bailout; } ext_sg_entries = ctsio->ext_sg_entries; ext_sg_start = ext_sg_entries; ext_offset = 0; len_seen = 0; for (i = 0; i < ext_sg_entries; i++) { if ((len_seen + ext_sglist[i].len) >= ctsio->ext_data_filled) { ext_sg_start = i; ext_offset = ctsio->ext_data_filled - len_seen; break; } len_seen += ext_sglist[i].len; } } else { ext_sglist = &ext_entry; ext_sglist_malloced = 0; ext_sglist->addr = ctsio->ext_data_ptr; ext_sglist->len = ctsio->ext_data_len; ext_sg_entries = 1; ext_sg_start = 0; ext_offset = ctsio->ext_data_filled; } if (ctsio->kern_sg_entries > 0) { kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; kern_sg_entries = ctsio->kern_sg_entries; } else { kern_sglist = &kern_entry; kern_sglist->addr = ctsio->kern_data_ptr; kern_sglist->len = ctsio->kern_data_len; kern_sg_entries = 1; } kern_watermark = 0; ext_watermark = ext_offset; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; len_to_copy = MIN(ext_sglist[i].len - ext_watermark, kern_sglist[j].len - kern_watermark); ext_ptr = (uint8_t *)ext_sglist[i].addr; ext_ptr = ext_ptr + ext_watermark; if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " "bytes to user\n", len_to_copy)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " "to %p\n", kern_ptr, ext_ptr)); if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { ctsio->io_hdr.port_status = 31344; goto bailout; } } else { CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " "bytes from user\n", len_to_copy)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " "to %p\n", ext_ptr, kern_ptr)); if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ ctsio->io_hdr.port_status = 31345; goto bailout; } } ctsio->ext_data_filled += len_to_copy; ctsio->kern_data_resid -= len_to_copy; ext_watermark += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } kern_watermark += len_to_copy; if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " "kern_sg_entries: %d\n", ext_sg_entries, kern_sg_entries)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " "kern_data_len = %d\n", ctsio->ext_data_len, ctsio->kern_data_len)); bailout: if (ext_sglist_malloced != 0) free(ext_sglist, M_CTL); return (CTL_RETVAL_COMPLETE); } static void cfi_datamove(union ctl_io *io) { struct ctl_fe_ioctl_params *params; params = (struct ctl_fe_ioctl_params *) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; mtx_lock(¶ms->ioctl_mtx); params->state = CTL_IOCTL_DATAMOVE; cv_broadcast(¶ms->sem); mtx_unlock(¶ms->ioctl_mtx); } static void cfi_done(union ctl_io *io) { struct ctl_fe_ioctl_params *params; params = (struct ctl_fe_ioctl_params *) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; mtx_lock(¶ms->ioctl_mtx); params->state = CTL_IOCTL_DONE; cv_broadcast(¶ms->sem); mtx_unlock(¶ms->ioctl_mtx); } static int cfi_submit_wait(union ctl_io *io) { struct ctl_fe_ioctl_params params; ctl_fe_ioctl_state last_state; int done, retval; bzero(¶ms, sizeof(params)); mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); cv_init(¶ms.sem, "ctlioccv"); params.state = CTL_IOCTL_INPROG; last_state = params.state; io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; CTL_DEBUG_PRINT(("cfi_submit_wait\n")); /* This shouldn't happen */ if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) return (retval); done = 0; do { mtx_lock(¶ms.ioctl_mtx); /* * Check the state here, and don't sleep if the state has * already changed (i.e. wakeup has already occurred, but we * weren't waiting yet). */ if (params.state == last_state) { /* XXX KDM cv_wait_sig instead? */ cv_wait(¶ms.sem, ¶ms.ioctl_mtx); } last_state = params.state; switch (params.state) { case CTL_IOCTL_INPROG: /* Why did we wake up? */ /* XXX KDM error here? */ mtx_unlock(¶ms.ioctl_mtx); break; case CTL_IOCTL_DATAMOVE: CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); /* * change last_state back to INPROG to avoid * deadlock on subsequent data moves. */ params.state = last_state = CTL_IOCTL_INPROG; mtx_unlock(¶ms.ioctl_mtx); ctl_ioctl_do_datamove(&io->scsiio); /* * Note that in some cases, most notably writes, * this will queue the I/O and call us back later. * In other cases, generally reads, this routine * will immediately call back and wake us up, * probably using our own context. */ io->scsiio.be_move_done(io); break; case CTL_IOCTL_DONE: mtx_unlock(¶ms.ioctl_mtx); CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); done = 1; break; default: mtx_unlock(¶ms.ioctl_mtx); /* XXX KDM error here? */ break; } } while (done == 0); mtx_destroy(¶ms.ioctl_mtx); cv_destroy(¶ms.sem); return (CTL_RETVAL_COMPLETE); } int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { union ctl_io *io; void *pool_tmp, *sc_tmp; int retval = 0; /* * If we haven't been "enabled", don't allow any SCSI I/O * to this FETD. */ if ((cfi_softc.port.status & CTL_PORT_STATUS_ONLINE) == 0) return (EPERM); io = ctl_alloc_io(cfi_softc.port.ctl_pool_ref); /* * Need to save the pool reference so it doesn't get * spammed by the user's ctl_io. */ pool_tmp = io->io_hdr.pool; sc_tmp = CTL_SOFTC(io); memcpy(io, (void *)addr, sizeof(*io)); io->io_hdr.pool = pool_tmp; CTL_SOFTC(io) = sc_tmp; /* * No status yet, so make sure the status is set properly. */ io->io_hdr.status = CTL_STATUS_NONE; /* * The user sets the initiator ID, target and LUN IDs. */ io->io_hdr.nexus.targ_port = cfi_softc.port.targ_port; io->io_hdr.flags |= CTL_FLAG_USER_REQ; if ((io->io_hdr.io_type == CTL_IO_SCSI) && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) io->scsiio.tag_num = cfi_softc.cur_tag_num++; retval = cfi_submit_wait(io); if (retval == 0) memcpy((void *)addr, io, sizeof(*io)); ctl_free_io(io); return (retval); } Index: projects/clang400-import/sys/cam/ctl/ctl_frontend_iscsi.c =================================================================== --- projects/clang400-import/sys/cam/ctl/ctl_frontend_iscsi.c (revision 312719) +++ projects/clang400-import/sys/cam/ctl/ctl_frontend_iscsi.c (revision 312720) @@ -1,3001 +1,2996 @@ /*- * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * CTL frontend for the iSCSI protocol. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ICL_KERNEL_PROXY #include #endif #ifdef ICL_KERNEL_PROXY FEATURE(cfiscsi_kernel_proxy, "iSCSI target built with ICL_KERNEL_PROXY"); #endif static MALLOC_DEFINE(M_CFISCSI, "cfiscsi", "Memory used for CTL iSCSI frontend"); static uma_zone_t cfiscsi_data_wait_zone; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0, "CAM Target Layer iSCSI Frontend"); static int debug = 1; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 1, "Enable debug messages"); static int ping_timeout = 5; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds"); static int login_timeout = 60; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds"); static int maxcmdsn_delta = 256; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN, &maxcmdsn_delta, 256, "Number of commands the initiator can send " "without confirmation"); #define CFISCSI_DEBUG(X, ...) \ do { \ if (debug > 1) { \ printf("%s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_WARN(X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ printf("%s: %s (%s): " X "\n", \ __func__, S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_WARN(S, X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s (%s): " X "\n", \ S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_LOCK(X) mtx_lock(&X->cs_lock) #define CFISCSI_SESSION_UNLOCK(X) mtx_unlock(&X->cs_lock) #define CFISCSI_SESSION_LOCK_ASSERT(X) mtx_assert(&X->cs_lock, MA_OWNED) #define CONN_SESSION(X) ((struct cfiscsi_session *)(X)->ic_prv0) #define PDU_SESSION(X) CONN_SESSION((X)->ip_conn) #define PDU_EXPDATASN(X) (X)->ip_prv0 #define PDU_TOTAL_TRANSFER_LEN(X) (X)->ip_prv1 #define PDU_R2TSN(X) (X)->ip_prv2 static int cfiscsi_init(void); static int cfiscsi_shutdown(void); static void cfiscsi_online(void *arg); static void cfiscsi_offline(void *arg); static int cfiscsi_info(void *arg, struct sbuf *sb); static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static void cfiscsi_datamove(union ctl_io *io); static void cfiscsi_datamove_in(union ctl_io *io); static void cfiscsi_datamove_out(union ctl_io *io); static void cfiscsi_done(union ctl_io *io); static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request); static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request); static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request); static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request); static void cfiscsi_session_terminate(struct cfiscsi_session *cs); static struct cfiscsi_data_wait *cfiscsi_data_wait_new( struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp); static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw); static struct cfiscsi_target *cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag); static struct cfiscsi_target *cfiscsi_target_find_or_create( struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag); static void cfiscsi_target_release(struct cfiscsi_target *ct); static void cfiscsi_session_delete(struct cfiscsi_session *cs); static struct cfiscsi_softc cfiscsi_softc; static struct ctl_frontend cfiscsi_frontend = { .name = "iscsi", .init = cfiscsi_init, .ioctl = cfiscsi_ioctl, .shutdown = cfiscsi_shutdown, }; CTL_FRONTEND_DECLARE(ctlcfiscsi, cfiscsi_frontend); MODULE_DEPEND(ctlcfiscsi, icl, 1, 1, 1); static struct icl_pdu * cfiscsi_pdu_new_response(struct icl_pdu *request, int flags) { return (icl_pdu_new(request->ip_conn, flags)); } static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request) { const struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; uint32_t cmdsn, expstatsn; cs = PDU_SESSION(request); /* * Every incoming PDU - not just NOP-Out - resets the ping timer. * The purpose of the timeout is to reset the connection when it stalls; * we don't want this to happen when NOP-In or NOP-Out ends up delayed * in some queue. * * XXX: Locking? */ cs->cs_timeout = 0; /* * Data-Out PDUs don't contain CmdSN. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) return (false); /* * We're only using fields common for all the request * (initiator -> target) PDUs. */ bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; cmdsn = ntohl(bhssc->bhssc_cmdsn); expstatsn = ntohl(bhssc->bhssc_expstatsn); CFISCSI_SESSION_LOCK(cs); #if 0 if (expstatsn != cs->cs_statsn) { CFISCSI_SESSION_DEBUG(cs, "received PDU with ExpStatSN %d, " "while current StatSN is %d", expstatsn, cs->cs_statsn); } #endif if ((request->ip_bhs->bhs_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) { /* * The target MUST silently ignore any non-immediate command * outside of this range. */ if (ISCSI_SNLT(cmdsn, cs->cs_cmdsn) || ISCSI_SNGT(cmdsn, cs->cs_cmdsn + maxcmdsn_delta)) { CFISCSI_SESSION_UNLOCK(cs); CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u", cmdsn, cs->cs_cmdsn); return (true); } /* * We don't support multiple connections now, so any * discontinuity in CmdSN means lost PDUs. Since we don't * support PDU retransmission -- terminate the connection. */ if (cmdsn != cs->cs_cmdsn) { CFISCSI_SESSION_UNLOCK(cs); CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u; dropping connection", cmdsn, cs->cs_cmdsn); cfiscsi_session_terminate(cs); return (true); } cs->cs_cmdsn++; } CFISCSI_SESSION_UNLOCK(cs); return (false); } static void cfiscsi_pdu_handle(struct icl_pdu *request) { struct cfiscsi_session *cs; bool ignore; cs = PDU_SESSION(request); ignore = cfiscsi_pdu_update_cmdsn(request); if (ignore) { icl_pdu_free(request); return; } /* * Handle the PDU; this includes e.g. receiving the remaining * part of PDU and submitting the SCSI command to CTL * or queueing a reply. The handling routine is responsible * for freeing the PDU when it's no longer needed. */ switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_NOP_OUT: cfiscsi_pdu_handle_nop_out(request); break; case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_pdu_handle_scsi_command(request); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_pdu_handle_task_request(request); break; case ISCSI_BHS_OPCODE_SCSI_DATA_OUT: cfiscsi_pdu_handle_data_out(request); break; case ISCSI_BHS_OPCODE_LOGOUT_REQUEST: cfiscsi_pdu_handle_logout_request(request); break; default: CFISCSI_SESSION_WARN(cs, "received PDU with unsupported " "opcode 0x%x; dropping connection", request->ip_bhs->bhs_opcode); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_receive_callback(struct icl_pdu *request) { struct cfiscsi_session *cs; cs = PDU_SESSION(request); #ifdef ICL_KERNEL_PROXY if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (cs->cs_login_pdu == NULL) cs->cs_login_pdu = request; else icl_pdu_free(request); cv_signal(&cs->cs_login_cv); return; } #endif cfiscsi_pdu_handle(request); } static void cfiscsi_error_callback(struct icl_conn *ic) { struct cfiscsi_session *cs; cs = CONN_SESSION(ic); CFISCSI_SESSION_WARN(cs, "connection error; dropping connection"); cfiscsi_session_terminate(cs); } static int cfiscsi_pdu_prepare(struct icl_pdu *response) { struct cfiscsi_session *cs; struct iscsi_bhs_scsi_response *bhssr; bool advance_statsn = true; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK_ASSERT(cs); /* * We're only using fields common for all the response * (target -> initiator) PDUs. */ bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; /* * 10.8.3: "The StatSN for this connection is not advanced * after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_R2T) advance_statsn = false; /* * 10.19.2: "However, when the Initiator Task Tag is set to 0xffffffff, * StatSN for the connection is not advanced after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_NOP_IN && bhssr->bhssr_initiator_task_tag == 0xffffffff) advance_statsn = false; /* * See the comment below - StatSN is not meaningful and must * not be advanced. */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_IN && (bhssr->bhssr_flags & BHSDI_FLAGS_S) == 0) advance_statsn = false; /* * 10.7.3: "The fields StatSN, Status, and Residual Count * only have meaningful content if the S bit is set to 1." */ if (bhssr->bhssr_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN || (bhssr->bhssr_flags & BHSDI_FLAGS_S)) bhssr->bhssr_statsn = htonl(cs->cs_statsn); bhssr->bhssr_expcmdsn = htonl(cs->cs_cmdsn); bhssr->bhssr_maxcmdsn = htonl(cs->cs_cmdsn + maxcmdsn_delta); if (advance_statsn) cs->cs_statsn++; return (0); } static void cfiscsi_pdu_queue(struct icl_pdu *response) { struct cfiscsi_session *cs; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK(cs); cfiscsi_pdu_prepare(response); icl_pdu_queue(response); CFISCSI_SESSION_UNLOCK(cs); } static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request) { struct cfiscsi_session *cs; struct iscsi_bhs_nop_out *bhsno; struct iscsi_bhs_nop_in *bhsni; struct icl_pdu *response; void *data = NULL; size_t datasize; int error; cs = PDU_SESSION(request); bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; if (bhsno->bhsno_initiator_task_tag == 0xffffffff) { /* * Nothing to do, iscsi_pdu_update_statsn() already * zeroed the timeout. */ icl_pdu_free(request); return; } datasize = icl_pdu_data_segment_length(request); if (datasize > 0) { data = malloc(datasize, M_CFISCSI, M_NOWAIT | M_ZERO); if (data == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } icl_pdu_get_data(request, 0, data, datasize); } response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "droppping connection"); free(data, M_CFISCSI); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = bhsno->bhsno_initiator_task_tag; bhsni->bhsni_target_transfer_tag = 0xffffffff; if (datasize > 0) { error = icl_pdu_append_data(response, data, datasize, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); free(data, M_CFISCSI); icl_pdu_free(request); icl_pdu_free(response); cfiscsi_session_terminate(cs); return; } free(data, M_CFISCSI); } icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request) { struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); if (request->ip_data_len > 0 && cs->cs_immediate_data == false) { CFISCSI_SESSION_WARN(cs, "unsolicited data with " "ImmediateData=No; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhssc->bhssc_lun)); io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag; switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) { case BHSSC_FLAGS_ATTR_UNTAGGED: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case BHSSC_FLAGS_ATTR_SIMPLE: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case BHSSC_FLAGS_ATTR_ORDERED: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case BHSSC_FLAGS_ATTR_HOQ: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case BHSSC_FLAGS_ATTR_ACA: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; CFISCSI_SESSION_WARN(cs, "unhandled tag type %d", bhssc->bhssc_flags & BHSSC_FLAGS_ATTR); break; } io->scsiio.cdb_len = sizeof(bhssc->bhssc_cdb); /* Which is 16. */ memcpy(io->scsiio.cdb, bhssc->bhssc_cdb, sizeof(bhssc->bhssc_cdb)); refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request) { struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct icl_pdu *response; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhstmr->bhstmr_lun)); io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ switch (bhstmr->bhstmr_function & ~0x80) { case BHSTMR_FUNCTION_ABORT_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_ABORT_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case BHSTMR_FUNCTION_CLEAR_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_CLEAR_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET"); #endif io->taskio.task_action = CTL_TASK_LUN_RESET; break; case BHSTMR_FUNCTION_TARGET_WARM_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_WARM_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_TARGET_COLD_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_COLD_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_QUERY_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_QUERY_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case BHSTMR_FUNCTION_I_T_NEXUS_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_I_T_NEXUS_RESET"); #endif io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; break; case BHSTMR_FUNCTION_QUERY_ASYNC_EVENT: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_ASYNC_EVENT"); #endif io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; default: CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x", bhstmr->bhstmr_function & ~0x80); ctl_free_io(io); response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); return; } refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static bool cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *cdw) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t copy_len, len, off, buffer_offset; int ctl_sg_count; union ctl_io *io; cs = PDU_SESSION(request); KASSERT((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT || (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bad opcode 0x%x", request->ip_bhs->bhs_opcode)); /* * We're only using fields common for Data-Out and SCSI Command PDUs. */ bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); #if 0 CFISCSI_SESSION_DEBUG(cs, "received %zd bytes out of %d", request->ip_data_len, io->scsiio.kern_total_len); #endif if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) buffer_offset = ntohl(bhsdo->bhsdo_buffer_offset); else buffer_offset = 0; len = icl_pdu_data_segment_length(request); /* * Make sure the offset, as sent by the initiator, matches the offset * we're supposed to be at in the scatter-gather list. */ if (buffer_offset > io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled || buffer_offset + len <= io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled) { CFISCSI_SESSION_WARN(cs, "received bad buffer offset %zd, " "expected %zd; dropping connection", buffer_offset, (size_t)io->scsiio.kern_rel_offset + (size_t)io->scsiio.ext_data_filled); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } /* * This is the offset within the PDU data segment, as opposed * to buffer_offset, which is the offset within the task (SCSI * command). */ off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled - buffer_offset; /* * Iterate over the scatter/gather segments, filling them with data * from the PDU data segment. Note that this can get called multiple * times for one SCSI command; the cdw structure holds state for the * scatter/gather list. */ for (;;) { KASSERT(cdw->cdw_sg_index < ctl_sg_count, ("cdw->cdw_sg_index >= ctl_sg_count")); if (cdw->cdw_sg_len == 0) { cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; } KASSERT(off <= len, ("len > off")); copy_len = len - off; if (copy_len > cdw->cdw_sg_len) copy_len = cdw->cdw_sg_len; icl_pdu_get_data(request, off, cdw->cdw_sg_addr, copy_len); cdw->cdw_sg_addr += copy_len; cdw->cdw_sg_len -= copy_len; off += copy_len; io->scsiio.ext_data_filled += copy_len; io->scsiio.kern_data_resid -= copy_len; if (cdw->cdw_sg_len == 0) { /* * End of current segment. */ if (cdw->cdw_sg_index == ctl_sg_count - 1) { /* * Last segment in scatter/gather list. */ break; } cdw->cdw_sg_index++; } if (off == len) { /* * End of PDU payload. */ break; } } if (len > off) { /* * In case of unsolicited data, it's possible that the buffer * provided by CTL is smaller than negotiated FirstBurstLength. * Just ignore the superfluous data; will ask for them with R2T * on next call to cfiscsi_datamove(). * * This obviously can only happen with SCSI Command PDU. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND) return (true); CFISCSI_SESSION_WARN(cs, "received too much data: got %zd bytes, " "expected %zd; dropping connection", icl_pdu_data_segment_length(request), off); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) == 0) { CFISCSI_SESSION_WARN(cs, "got the final packet without " "the F flag; flags = 0x%x; dropping connection", bhsdo->bhsdo_flags); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled != cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) != 0) { if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) { CFISCSI_SESSION_WARN(cs, "got the final packet, but the " "transmitted size was %zd bytes instead of %d; " "dropping connection", (size_t)io->scsiio.ext_data_filled, cdw->cdw_r2t_end); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } else { /* * For SCSI Command PDU, this just means we need to * solicit more data by sending R2T. */ return (false); } } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end) { #if 0 CFISCSI_SESSION_DEBUG(cs, "no longer expecting Data-Out with target " "transfer tag 0x%x", cdw->cdw_target_transfer_tag); #endif return (true); } return (false); } static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct cfiscsi_data_wait *cdw = NULL; union ctl_io *io; bool done; cs = PDU_SESSION(request); bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) { #if 0 CFISCSI_SESSION_DEBUG(cs, "have ttt 0x%x, itt 0x%x; looking for " "ttt 0x%x, itt 0x%x", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag, cdw->cdw_target_transfer_tag, cdw->cdw_initiator_task_tag)); #endif if (bhsdo->bhsdo_target_transfer_tag == cdw->cdw_target_transfer_tag) break; } CFISCSI_SESSION_UNLOCK(cs); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "data transfer tag 0x%x, initiator task tag " "0x%x, not found; dropping connection", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } if (cdw->cdw_datasn != ntohl(bhsdo->bhsdo_datasn)) { CFISCSI_SESSION_WARN(cs, "received Data-Out PDU with " "DataSN %u, while expected %u; dropping connection", ntohl(bhsdo->bhsdo_datasn), cdw->cdw_datasn); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } cdw->cdw_datasn++; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); done = cfiscsi_handle_data_segment(request, cdw); if (done) { CFISCSI_SESSION_LOCK(cs); TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end || io->scsiio.ext_data_filled == io->scsiio.kern_data_len); cfiscsi_data_wait_free(cs, cdw); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; if (done) io->scsiio.be_move_done(io); else cfiscsi_datamove_out(io); } icl_pdu_free(request); } static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request) { struct iscsi_bhs_logout_request *bhslr; struct iscsi_bhs_logout_response *bhslr2; struct icl_pdu *response; struct cfiscsi_session *cs; cs = PDU_SESSION(request); bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs; switch (bhslr->bhslr_reason & 0x7f) { case BHSLR_REASON_CLOSE_SESSION: case BHSLR_REASON_CLOSE_CONNECTION: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_DEBUG(cs, "failed to allocate memory"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_CLOSED_SUCCESSFULLY; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); cfiscsi_session_terminate(cs); break; case BHSLR_REASON_REMOVE_FOR_RECOVERY: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_RECOVERY_NOT_SUPPORTED; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); break; default: CFISCSI_SESSION_WARN(cs, "invalid reason 0%x; dropping connection", bhslr->bhslr_reason); icl_pdu_free(request); cfiscsi_session_terminate(cs); break; } } static void cfiscsi_callout(void *context) { struct icl_pdu *cp; struct iscsi_bhs_nop_in *bhsni; struct cfiscsi_session *cs; cs = context; if (cs->cs_terminating) return; callout_schedule(&cs->cs_callout, 1 * hz); atomic_add_int(&cs->cs_timeout, 1); #ifdef ICL_KERNEL_PROXY if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (login_timeout > 0 && cs->cs_timeout > login_timeout) { CFISCSI_SESSION_WARN(cs, "login timed out after " "%d seconds; dropping connection", cs->cs_timeout); cfiscsi_session_terminate(cs); } return; } #endif if (ping_timeout <= 0) { /* * Pings are disabled. Don't send NOP-In in this case; * user might have disabled pings to work around problems * with certain initiators that can't properly handle * NOP-In, such as iPXE. Reset the timeout, to avoid * triggering reconnection, should the user decide to * reenable them. */ cs->cs_timeout = 0; return; } if (cs->cs_timeout >= ping_timeout) { CFISCSI_SESSION_WARN(cs, "no ping reply (NOP-Out) after %d seconds; " "dropping connection", ping_timeout); cfiscsi_session_terminate(cs); return; } /* * If the ping was reset less than one second ago - which means * that we've received some PDU during the last second - assume * the traffic flows correctly and don't bother sending a NOP-Out. * * (It's 2 - one for one second, and one for incrementing is_timeout * earlier in this routine.) */ if (cs->cs_timeout < 2) return; cp = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (cp == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory"); return; } bhsni = (struct iscsi_bhs_nop_in *)cp->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = 0xffffffff; cfiscsi_pdu_queue(cp); } static struct cfiscsi_data_wait * cfiscsi_data_wait_new(struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp) { struct cfiscsi_data_wait *cdw; int error; cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate %zd bytes", sizeof(*cdw)); return (NULL); } error = icl_conn_transfer_setup(cs->cs_conn, io, target_transfer_tagp, &cdw->cdw_icl_prv); if (error != 0) { CFISCSI_SESSION_WARN(cs, "icl_conn_transfer_setup() failed with error %d", error); uma_zfree(cfiscsi_data_wait_zone, cdw); return (NULL); } cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = *target_transfer_tagp; cdw->cdw_initiator_task_tag = initiator_task_tag; return (cdw); } static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw) { icl_conn_transfer_done(cs->cs_conn, cdw->cdw_icl_prv); uma_zfree(cfiscsi_data_wait_zone, cdw); } static void cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs) { struct cfiscsi_data_wait *cdw; union ctl_io *io; int error, last, wait; if (cs->cs_target == NULL) return; /* No target yet, so nothing to do. */ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = 0; io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; wait = cs->cs_outstanding_ctl_pdus; refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error); refcount_release(&cs->cs_outstanding_ctl_pdus); ctl_free_io(io); } CFISCSI_SESSION_LOCK(cs); while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) { TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * Set nonzero port status; this prevents backends from * assuming that the data transfer actually succeeded * and writing uninitialized data to disk. */ cdw->cdw_ctl_io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42; cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); cfiscsi_data_wait_free(cs, cdw); CFISCSI_SESSION_LOCK(cs); } CFISCSI_SESSION_UNLOCK(cs); /* * Wait for CTL to terminate all the tasks. */ if (wait > 0) CFISCSI_SESSION_WARN(cs, "waiting for CTL to terminate %d tasks", wait); for (;;) { refcount_acquire(&cs->cs_outstanding_ctl_pdus); last = refcount_release(&cs->cs_outstanding_ctl_pdus); if (last != 0) break; tsleep(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus), 0, "cfiscsi_terminate", hz / 100); } if (wait > 0) CFISCSI_SESSION_WARN(cs, "tasks terminated"); } static void cfiscsi_maintenance_thread(void *arg) { struct cfiscsi_session *cs; cs = arg; for (;;) { CFISCSI_SESSION_LOCK(cs); if (cs->cs_terminating == false) cv_wait(&cs->cs_maintenance_cv, &cs->cs_lock); CFISCSI_SESSION_UNLOCK(cs); if (cs->cs_terminating) { /* * We used to wait up to 30 seconds to deliver queued * PDUs to the initiator. We also tried hard to deliver * SCSI Responses for the aborted PDUs. We don't do * that anymore. We might need to revisit that. */ callout_drain(&cs->cs_callout); icl_conn_close(cs->cs_conn); /* * At this point ICL receive thread is no longer * running; no new tasks can be queued. */ cfiscsi_session_terminate_tasks(cs); cfiscsi_session_delete(cs); kthread_exit(); return; } CFISCSI_SESSION_DEBUG(cs, "nothing to do"); } } static void cfiscsi_session_terminate(struct cfiscsi_session *cs) { if (cs->cs_terminating) return; cs->cs_terminating = true; cv_signal(&cs->cs_maintenance_cv); #ifdef ICL_KERNEL_PROXY cv_signal(&cs->cs_login_cv); #endif } static int cfiscsi_session_register_initiator(struct cfiscsi_session *cs) { struct cfiscsi_target *ct; char *name; int i; KASSERT(cs->cs_ctl_initid == -1, ("already registered")); ct = cs->cs_target; name = strdup(cs->cs_initiator_id, M_CTL); i = ctl_add_initiator(&ct->ct_port, -1, 0, name); if (i < 0) { CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d", i); cs->cs_ctl_initid = -1; return (1); } cs->cs_ctl_initid = i; #if 0 CFISCSI_SESSION_DEBUG(cs, "added initiator id %d", i); #endif return (0); } static void cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs) { int error; if (cs->cs_ctl_initid == -1) return; error = ctl_remove_initiator(&cs->cs_target->ct_port, cs->cs_ctl_initid); if (error != 0) { CFISCSI_SESSION_WARN(cs, "ctl_remove_initiator failed with error %d", error); } cs->cs_ctl_initid = -1; } static struct cfiscsi_session * cfiscsi_session_new(struct cfiscsi_softc *softc, const char *offload) { struct cfiscsi_session *cs; int error; cs = malloc(sizeof(*cs), M_CFISCSI, M_NOWAIT | M_ZERO); if (cs == NULL) { CFISCSI_WARN("malloc failed"); return (NULL); } cs->cs_ctl_initid = -1; refcount_init(&cs->cs_outstanding_ctl_pdus, 0); TAILQ_INIT(&cs->cs_waiting_for_data_out); mtx_init(&cs->cs_lock, "cfiscsi_lock", NULL, MTX_DEF); cv_init(&cs->cs_maintenance_cv, "cfiscsi_mt"); #ifdef ICL_KERNEL_PROXY cv_init(&cs->cs_login_cv, "cfiscsi_login"); #endif cs->cs_conn = icl_new_conn(offload, false, "cfiscsi", &cs->cs_lock); if (cs->cs_conn == NULL) { free(cs, M_CFISCSI); return (NULL); } cs->cs_conn->ic_receive = cfiscsi_receive_callback; cs->cs_conn->ic_error = cfiscsi_error_callback; cs->cs_conn->ic_prv0 = cs; error = kthread_add(cfiscsi_maintenance_thread, cs, NULL, NULL, 0, 0, "cfiscsimt"); if (error != 0) { CFISCSI_SESSION_WARN(cs, "kthread_add(9) failed with error %d", error); free(cs, M_CFISCSI); return (NULL); } mtx_lock(&softc->lock); cs->cs_id = ++softc->last_session_id; TAILQ_INSERT_TAIL(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); /* * Start pinging the initiator. */ callout_init(&cs->cs_callout, 1); callout_reset(&cs->cs_callout, 1 * hz, cfiscsi_callout, cs); return (cs); } static void cfiscsi_session_delete(struct cfiscsi_session *cs) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; KASSERT(cs->cs_outstanding_ctl_pdus == 0, ("destroying session with outstanding CTL pdus")); KASSERT(TAILQ_EMPTY(&cs->cs_waiting_for_data_out), ("destroying session with non-empty queue")); cfiscsi_session_unregister_initiator(cs); if (cs->cs_target != NULL) cfiscsi_target_release(cs->cs_target); icl_conn_close(cs->cs_conn); icl_conn_free(cs->cs_conn); mtx_lock(&softc->lock); TAILQ_REMOVE(&softc->sessions, cs, cs_next); cv_signal(&softc->sessions_cv); mtx_unlock(&softc->lock); free(cs, M_CFISCSI); } static int cfiscsi_init(void) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "cfiscsi", NULL, MTX_DEF); cv_init(&softc->sessions_cv, "cfiscsi_sessions"); #ifdef ICL_KERNEL_PROXY cv_init(&softc->accept_cv, "cfiscsi_accept"); #endif TAILQ_INIT(&softc->sessions); TAILQ_INIT(&softc->targets); cfiscsi_data_wait_zone = uma_zcreate("cfiscsi_data_wait", sizeof(struct cfiscsi_data_wait), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); return (0); } static int cfiscsi_shutdown(void) { struct cfiscsi_softc *softc = &cfiscsi_softc; if (!TAILQ_EMPTY(&softc->sessions) || !TAILQ_EMPTY(&softc->targets)) return (EBUSY); uma_zdestroy(cfiscsi_data_wait_zone); #ifdef ICL_KERNEL_PROXY cv_destroy(&softc->accept_cv); #endif cv_destroy(&softc->sessions_cv); mtx_destroy(&softc->lock); return (0); } #ifdef ICL_KERNEL_PROXY static void cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id) { struct cfiscsi_session *cs; cs = cfiscsi_session_new(&cfiscsi_softc, NULL); if (cs == NULL) { CFISCSI_WARN("failed to create session"); return; } icl_conn_handoff_sock(cs->cs_conn, so); cs->cs_initiator_sa = sa; cs->cs_portal_id = portal_id; cs->cs_waiting_for_ctld = true; cv_signal(&cfiscsi_softc.accept_cv); } #endif static void cfiscsi_online(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 1; online = softc->online++; mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY if (softc->listener != NULL) icl_listen_free(softc->listener); softc->listener = icl_listen_new(cfiscsi_accept); #endif } static void cfiscsi_offline(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; struct cfiscsi_session *cs; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (!ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 0; online = --softc->online; TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) cfiscsi_session_terminate(cs); } do { TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) break; } if (cs != NULL) cv_wait(&softc->sessions_cv, &softc->lock); } while (cs != NULL && ct->ct_online == 0); mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY icl_listen_free(softc->listener); softc->listener = NULL; #endif } static int cfiscsi_info(void *arg, struct sbuf *sb) { struct cfiscsi_target *ct = (struct cfiscsi_target *)arg; int retval; retval = sbuf_printf(sb, "\t%d\n", ct->ct_state); return (retval); } static void cfiscsi_ioctl_handoff(struct ctl_iscsi *ci) { struct cfiscsi_softc *softc; struct cfiscsi_session *cs, *cs2; struct cfiscsi_target *ct; struct ctl_iscsi_handoff_params *cihp; int error; cihp = (struct ctl_iscsi_handoff_params *)&(ci->data); softc = &cfiscsi_softc; CFISCSI_DEBUG("new connection from %s (%s) to %s", cihp->initiator_name, cihp->initiator_addr, cihp->target_name); ct = cfiscsi_target_find(softc, cihp->target_name, cihp->portal_group_tag); if (ct == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: target not found", __func__); return; } #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0 && cihp->connection_id > 0) { snprintf(ci->error_str, sizeof(ci->error_str), "both socket and connection_id set"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } if (cihp->socket == 0) { mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cihp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } mtx_unlock(&cfiscsi_softc.lock); } else { #endif cs = cfiscsi_session_new(softc, cihp->offload); if (cs == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: cfiscsi_session_new failed", __func__); cfiscsi_target_release(ct); return; } #ifdef ICL_KERNEL_PROXY } #endif /* * First PDU of Full Feature phase has the same CmdSN as the last * PDU from the Login Phase received from the initiator. Thus, * the -1 below. */ cs->cs_cmdsn = cihp->cmdsn; cs->cs_statsn = cihp->statsn; cs->cs_max_recv_data_segment_length = cihp->max_recv_data_segment_length; cs->cs_max_send_data_segment_length = cihp->max_send_data_segment_length; cs->cs_max_burst_length = cihp->max_burst_length; cs->cs_first_burst_length = cihp->first_burst_length; cs->cs_immediate_data = !!cihp->immediate_data; if (cihp->header_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_header_crc32c = true; if (cihp->data_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_data_crc32c = true; strlcpy(cs->cs_initiator_name, cihp->initiator_name, sizeof(cs->cs_initiator_name)); strlcpy(cs->cs_initiator_addr, cihp->initiator_addr, sizeof(cs->cs_initiator_addr)); strlcpy(cs->cs_initiator_alias, cihp->initiator_alias, sizeof(cs->cs_initiator_alias)); memcpy(cs->cs_initiator_isid, cihp->initiator_isid, sizeof(cs->cs_initiator_isid)); snprintf(cs->cs_initiator_id, sizeof(cs->cs_initiator_id), "%s,i,0x%02x%02x%02x%02x%02x%02x", cs->cs_initiator_name, cihp->initiator_isid[0], cihp->initiator_isid[1], cihp->initiator_isid[2], cihp->initiator_isid[3], cihp->initiator_isid[4], cihp->initiator_isid[5]); mtx_lock(&softc->lock); if (ct->ct_online == 0) { mtx_unlock(&softc->lock); cfiscsi_session_terminate(cs); cfiscsi_target_release(ct); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: port offline", __func__); return; } cs->cs_target = ct; mtx_unlock(&softc->lock); refcount_acquire(&cs->cs_outstanding_ctl_pdus); restart: if (!cs->cs_terminating) { mtx_lock(&softc->lock); TAILQ_FOREACH(cs2, &softc->sessions, cs_next) { if (cs2 != cs && cs2->cs_tasks_aborted == false && cs->cs_target == cs2->cs_target && strcmp(cs->cs_initiator_id, cs2->cs_initiator_id) == 0) { if (strcmp(cs->cs_initiator_addr, cs2->cs_initiator_addr) != 0) { CFISCSI_SESSION_WARN(cs2, "session reinstatement from " "different address %s", cs->cs_initiator_addr); } else { CFISCSI_SESSION_DEBUG(cs2, "session reinstatement"); } cfiscsi_session_terminate(cs2); mtx_unlock(&softc->lock); pause("cfiscsi_reinstate", 1); goto restart; } } mtx_unlock(&softc->lock); } /* * Register initiator with CTL. */ cfiscsi_session_register_initiator(cs); #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0) { #endif error = icl_conn_handoff(cs->cs_conn, cihp->socket); if (error != 0) { cfiscsi_session_terminate(cs); refcount_release(&cs->cs_outstanding_ctl_pdus); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_conn_handoff failed with error %d", __func__, error); return; } #ifdef ICL_KERNEL_PROXY } #endif #ifdef ICL_KERNEL_PROXY cs->cs_login_phase = false; /* * First PDU of the Full Feature phase has likely already arrived. * We have to pick it up and execute properly. */ if (cs->cs_login_pdu != NULL) { CFISCSI_SESSION_DEBUG(cs, "picking up first PDU"); cfiscsi_pdu_handle(cs->cs_login_pdu); cs->cs_login_pdu = NULL; } #endif refcount_release(&cs->cs_outstanding_ctl_pdus); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_list(struct ctl_iscsi *ci) { struct ctl_iscsi_list_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; struct sbuf *sb; int error; cilp = (struct ctl_iscsi_list_params *)&(ci->data); softc = &cfiscsi_softc; sb = sbuf_new(NULL, NULL, cilp->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate %d bytes for iSCSI session list", cilp->alloc_len); return; } sbuf_printf(sb, "\n"); mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { #ifdef ICL_KERNEL_PROXY if (cs->cs_target == NULL) continue; #endif error = sbuf_printf(sb, "" "%s" "%s" "%s" "%s" "%s" "%u" "%s" "%s" "%d" "%d" "%d" "%d" "%d" "%d" "%s" "\n", cs->cs_id, cs->cs_initiator_name, cs->cs_initiator_addr, cs->cs_initiator_alias, cs->cs_target->ct_name, cs->cs_target->ct_alias, cs->cs_target->ct_tag, cs->cs_conn->ic_header_crc32c ? "CRC32C" : "None", cs->cs_conn->ic_data_crc32c ? "CRC32C" : "None", cs->cs_max_recv_data_segment_length, cs->cs_max_send_data_segment_length, cs->cs_max_burst_length, cs->cs_first_burst_length, cs->cs_immediate_data, cs->cs_conn->ic_iser, cs->cs_conn->ic_offload); if (error != 0) break; } mtx_unlock(&softc->lock); error = sbuf_printf(sb, "\n"); if (error != 0) { sbuf_delete(sb); ci->status = CTL_ISCSI_LIST_NEED_MORE_SPACE; snprintf(ci->error_str, sizeof(ci->error_str), "Out of space, %d bytes is too small", cilp->alloc_len); return; } sbuf_finish(sb); error = copyout(sbuf_data(sb), cilp->conn_xml, sbuf_len(sb) + 1); cilp->fill_len = sbuf_len(sb) + 1; ci->status = CTL_ISCSI_OK; sbuf_delete(sb); } static void cfiscsi_ioctl_logout(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_logout_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; cilp = (struct ctl_iscsi_logout_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cilp->all == 0 && cs->cs_id != cilp->connection_id && strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate memory"); mtx_unlock(&softc->lock); return; } bhsam = (struct iscsi_bhs_asynchronous_message *)response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT; bhsam->bhsam_parameter3 = htons(10); cfiscsi_pdu_queue(response); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_terminate(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_terminate_params *citp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; citp = (struct ctl_iscsi_terminate_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (citp->all == 0 && cs->cs_id != citp->connection_id && strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { /* * Oh well. Just terminate the connection. */ } else { bhsam = (struct iscsi_bhs_asynchronous_message *) response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_0xffffffff = 0xffffffff; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_TERMINATES_SESSION; cfiscsi_pdu_queue(response); } cfiscsi_session_terminate(cs); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_limits(struct ctl_iscsi *ci) { struct ctl_iscsi_limits_params *cilp; struct icl_drv_limits idl; int error; cilp = (struct ctl_iscsi_limits_params *)&(ci->data); error = icl_limits(cilp->offload, false, &idl); if (error != 0) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_limits failed with error %d", __func__, error); return; } cilp->max_recv_data_segment_length = idl.idl_max_recv_data_segment_length; cilp->max_send_data_segment_length = idl.idl_max_send_data_segment_length; cilp->max_burst_length = idl.idl_max_burst_length; cilp->first_burst_length = idl.idl_first_burst_length; ci->status = CTL_ISCSI_OK; } #ifdef ICL_KERNEL_PROXY static void cfiscsi_ioctl_listen(struct ctl_iscsi *ci) { struct ctl_iscsi_listen_params *cilp; struct sockaddr *sa; int error; cilp = (struct ctl_iscsi_listen_params *)&(ci->data); if (cfiscsi_softc.listener == NULL) { CFISCSI_DEBUG("no listener"); snprintf(ci->error_str, sizeof(ci->error_str), "no listener"); ci->status = CTL_ISCSI_ERROR; return; } error = getsockaddr(&sa, (void *)cilp->addr, cilp->addrlen); if (error != 0) { CFISCSI_DEBUG("getsockaddr, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "getsockaddr failed"); ci->status = CTL_ISCSI_ERROR; return; } error = icl_listen_add(cfiscsi_softc.listener, cilp->iser, cilp->domain, cilp->socktype, cilp->protocol, sa, cilp->portal_id); if (error != 0) { free(sa, M_SONAME); CFISCSI_DEBUG("icl_listen_add, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "icl_listen_add failed, error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_accept(struct ctl_iscsi *ci) { struct ctl_iscsi_accept_params *ciap; struct cfiscsi_session *cs; int error; ciap = (struct ctl_iscsi_accept_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); for (;;) { TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_waiting_for_ctld) break; } if (cs != NULL) break; error = cv_wait_sig(&cfiscsi_softc.accept_cv, &cfiscsi_softc.lock); if (error != 0) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted"); ci->status = CTL_ISCSI_ERROR; return; } } mtx_unlock(&cfiscsi_softc.lock); cs->cs_waiting_for_ctld = false; cs->cs_login_phase = true; ciap->connection_id = cs->cs_id; ciap->portal_id = cs->cs_portal_id; ciap->initiator_addrlen = cs->cs_initiator_sa->sa_len; error = copyout(cs->cs_initiator_sa, ciap->initiator_addr, cs->cs_initiator_sa->sa_len); if (error != 0) { snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_send(struct ctl_iscsi *ci) { struct ctl_iscsi_send_params *cisp; struct cfiscsi_session *cs; struct icl_pdu *ip; size_t datalen; void *data; int error; cisp = (struct ctl_iscsi_send_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cisp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (cs->cs_login_phase == false) return (EBUSY); #endif if (cs->cs_terminating) { snprintf(ci->error_str, sizeof(ci->error_str), "connection is terminating"); ci->status = CTL_ISCSI_ERROR; return; } datalen = cisp->data_segment_len; /* * XXX */ //if (datalen > CFISCSI_MAX_DATA_SEGMENT_LENGTH) { if (datalen > 65535) { snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } if (datalen > 0) { data = malloc(datalen, M_CFISCSI, M_WAITOK); error = copyin(cisp->data_segment, data, datalen); if (error != 0) { free(data, M_CFISCSI); snprintf(ci->error_str, sizeof(ci->error_str), "copyin error %d", error); ci->status = CTL_ISCSI_ERROR; return; } } ip = icl_pdu_new(cs->cs_conn, M_WAITOK); memcpy(ip->ip_bhs, cisp->bhs, sizeof(*ip->ip_bhs)); if (datalen > 0) { icl_pdu_append_data(ip, data, datalen, M_WAITOK); free(data, M_CFISCSI); } CFISCSI_SESSION_LOCK(cs); icl_pdu_queue(ip); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_receive(struct ctl_iscsi *ci) { struct ctl_iscsi_receive_params *cirp; struct cfiscsi_session *cs; struct icl_pdu *ip; void *data; int error; cirp = (struct ctl_iscsi_receive_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cirp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (is->is_login_phase == false) return (EBUSY); #endif CFISCSI_SESSION_LOCK(cs); while (cs->cs_login_pdu == NULL && cs->cs_terminating == false) { error = cv_wait_sig(&cs->cs_login_cv, &cs->cs_lock); if (error != 0) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted by signal"); ci->status = CTL_ISCSI_ERROR; return; } } if (cs->cs_terminating) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "connection terminating"); ci->status = CTL_ISCSI_ERROR; return; } ip = cs->cs_login_pdu; cs->cs_login_pdu = NULL; CFISCSI_SESSION_UNLOCK(cs); if (ip->ip_data_len > cirp->data_segment_len) { icl_pdu_free(ip); snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } copyout(ip->ip_bhs, cirp->bhs, sizeof(*ip->ip_bhs)); if (ip->ip_data_len > 0) { data = malloc(ip->ip_data_len, M_CFISCSI, M_WAITOK); icl_pdu_get_data(ip, 0, data, ip->ip_data_len); copyout(data, cirp->data_segment, ip->ip_data_len); free(data, M_CFISCSI); } icl_pdu_free(ip); ci->status = CTL_ISCSI_OK; } #endif /* !ICL_KERNEL_PROXY */ static void cfiscsi_ioctl_port_create(struct ctl_req *req) { struct cfiscsi_target *ct; struct ctl_port *port; const char *target, *alias, *tags; struct scsi_vpd_id_descriptor *desc; ctl_options_t opts; int retval, len, idlen; uint16_t tag; ctl_init_opts(&opts, req->num_args, req->kern_args); target = ctl_get_opt(&opts, "cfiscsi_target"); alias = ctl_get_opt(&opts, "cfiscsi_target_alias"); tags = ctl_get_opt(&opts, "cfiscsi_portal_group_tag"); if (target == NULL || tags == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); ctl_free_opts(&opts); return; } tag = strtol(tags, (char **)NULL, 10); ct = cfiscsi_target_find_or_create(&cfiscsi_softc, target, alias, tag); if (ct == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "failed to create target \"%s\"", target); ctl_free_opts(&opts); return; } if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" for portal group tag %u already exists", target, tag); cfiscsi_target_release(ct); ctl_free_opts(&opts); return; } port = &ct->ct_port; // WAT if (ct->ct_state == CFISCSI_TARGET_STATE_DYING) goto done; port->frontend = &cfiscsi_frontend; port->port_type = CTL_PORT_ISCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; port->port_name = "iscsi"; port->physical_port = tag; port->virtual_port = ct->ct_target_id; port->port_online = cfiscsi_online; port->port_offline = cfiscsi_offline; port->port_info = cfiscsi_info; port->onoff_arg = ct; port->fe_datamove = cfiscsi_datamove; port->fe_done = cfiscsi_done; - - /* XXX KDM what should we report here? */ - /* XXX These should probably be fetched from CTL. */ - port->max_targets = 1; - port->max_target_id = 15; port->targ_port = -1; port->options = opts; STAILQ_INIT(&opts); /* Generate Port ID. */ idlen = strlen(target) + strlen(",t,0x0001") + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->port_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->port_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; snprintf(desc->identifier, idlen, "%s,t,0x%4.4x", target, tag); /* Generate Target ID. */ idlen = strlen(target) + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->target_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->target_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; strlcpy(desc->identifier, target, idlen); retval = ctl_port_register(port); if (retval != 0) { ctl_free_opts(&port->options); cfiscsi_target_release(ct); free(port->port_devid, M_CFISCSI); free(port->target_devid, M_CFISCSI); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "ctl_port_register() failed with error %d", retval); return; } done: ct->ct_state = CFISCSI_TARGET_STATE_ACTIVE; req->status = CTL_LUN_OK; memcpy(req->kern_args[0].kvalue, &port->targ_port, sizeof(port->targ_port)); //XXX } static void cfiscsi_ioctl_port_remove(struct ctl_req *req) { struct cfiscsi_target *ct; const char *target, *tags; ctl_options_t opts; uint16_t tag; ctl_init_opts(&opts, req->num_args, req->kern_args); target = ctl_get_opt(&opts, "cfiscsi_target"); tags = ctl_get_opt(&opts, "cfiscsi_portal_group_tag"); if (target == NULL || tags == NULL) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); return; } tag = strtol(tags, (char **)NULL, 10); ct = cfiscsi_target_find(&cfiscsi_softc, target, tag); if (ct == NULL) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "can't find target \"%s\"", target); return; } if (ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" is already dying", target); return; } ctl_free_opts(&opts); ct->ct_state = CFISCSI_TARGET_STATE_DYING; ctl_port_offline(&ct->ct_port); cfiscsi_target_release(ct); cfiscsi_target_release(ct); req->status = CTL_LUN_OK; } static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_iscsi *ci; struct ctl_req *req; if (cmd == CTL_PORT_REQ) { req = (struct ctl_req *)addr; switch (req->reqtype) { case CTL_REQ_CREATE: cfiscsi_ioctl_port_create(req); break; case CTL_REQ_REMOVE: cfiscsi_ioctl_port_remove(req); break; default: req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Unsupported request type %d", req->reqtype); } return (0); } if (cmd != CTL_ISCSI) return (ENOTTY); ci = (struct ctl_iscsi *)addr; switch (ci->type) { case CTL_ISCSI_HANDOFF: cfiscsi_ioctl_handoff(ci); break; case CTL_ISCSI_LIST: cfiscsi_ioctl_list(ci); break; case CTL_ISCSI_LOGOUT: cfiscsi_ioctl_logout(ci); break; case CTL_ISCSI_TERMINATE: cfiscsi_ioctl_terminate(ci); break; case CTL_ISCSI_LIMITS: cfiscsi_ioctl_limits(ci); break; #ifdef ICL_KERNEL_PROXY case CTL_ISCSI_LISTEN: cfiscsi_ioctl_listen(ci); break; case CTL_ISCSI_ACCEPT: cfiscsi_ioctl_accept(ci); break; case CTL_ISCSI_SEND: cfiscsi_ioctl_send(ci); break; case CTL_ISCSI_RECEIVE: cfiscsi_ioctl_receive(ci); break; #else case CTL_ISCSI_LISTEN: case CTL_ISCSI_ACCEPT: case CTL_ISCSI_SEND: case CTL_ISCSI_RECEIVE: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: CTL compiled without ICL_KERNEL_PROXY", __func__); break; #endif /* !ICL_KERNEL_PROXY */ default: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: invalid iSCSI request type %d", __func__, ci->type); break; } return (0); } static void cfiscsi_target_hold(struct cfiscsi_target *ct) { refcount_acquire(&ct->ct_refcount); } static void cfiscsi_target_release(struct cfiscsi_target *ct) { struct cfiscsi_softc *softc; softc = ct->ct_softc; mtx_lock(&softc->lock); if (refcount_release(&ct->ct_refcount)) { TAILQ_REMOVE(&softc->targets, ct, ct_next); mtx_unlock(&softc->lock); if (ct->ct_state != CFISCSI_TARGET_STATE_INVALID) { ct->ct_state = CFISCSI_TARGET_STATE_INVALID; if (ctl_port_deregister(&ct->ct_port) != 0) printf("%s: ctl_port_deregister() failed\n", __func__); } free(ct, M_CFISCSI); return; } mtx_unlock(&softc->lock); } static struct cfiscsi_target * cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag) { struct cfiscsi_target *ct; mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); return (ct); } mtx_unlock(&softc->lock); return (NULL); } static struct cfiscsi_target * cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag) { struct cfiscsi_target *ct, *newct; if (name[0] == '\0' || strlen(name) >= CTL_ISCSI_NAME_LEN) return (NULL); newct = malloc(sizeof(*newct), M_CFISCSI, M_WAITOK | M_ZERO); mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state == CFISCSI_TARGET_STATE_INVALID) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); free(newct, M_CFISCSI); return (ct); } strlcpy(newct->ct_name, name, sizeof(newct->ct_name)); if (alias != NULL) strlcpy(newct->ct_alias, alias, sizeof(newct->ct_alias)); newct->ct_tag = tag; refcount_init(&newct->ct_refcount, 1); newct->ct_softc = softc; if (TAILQ_EMPTY(&softc->targets)) softc->last_target_id = 0; newct->ct_target_id = ++softc->last_target_id; TAILQ_INSERT_TAIL(&softc->targets, newct, ct_next); mtx_unlock(&softc->lock); return (newct); } static void cfiscsi_datamove_in(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_data_in *bhsdi; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t len, expected_len, sg_len, buffer_offset; const char *sg_addr; int ctl_sg_count, error, i; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } /* * This is the total amount of data to be transferred within the current * SCSI command. We need to record it so that we can properly report * underflow/underflow. */ PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len; /* * This is the offset within the current SCSI command; for the first * call to cfiscsi_datamove() it will be 0, and for subsequent ones * it will be the sum of lengths of previous ones. */ buffer_offset = io->scsiio.kern_rel_offset; /* * This is the transfer length expected by the initiator. In theory, * it could be different from the correct amount of data from the SCSI * point of view, even if that doesn't make any sense. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); #if 0 if (expected_len != io->scsiio.kern_total_len) { CFISCSI_SESSION_DEBUG(cs, "expected transfer length %zd, " "actual length %zd", expected_len, (size_t)io->scsiio.kern_total_len); } #endif if (buffer_offset >= expected_len) { #if 0 CFISCSI_SESSION_DEBUG(cs, "buffer_offset = %zd, " "already sent the expected len", buffer_offset); #endif io->scsiio.be_move_done(io); return; } i = 0; sg_addr = NULL; sg_len = 0; response = NULL; bhsdi = NULL; for (;;) { if (response == NULL) { response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs; bhsdi->bhsdi_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_IN; bhsdi->bhsdi_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsdi->bhsdi_target_transfer_tag = 0xffffffff; bhsdi->bhsdi_datasn = htonl(PDU_EXPDATASN(request)); PDU_EXPDATASN(request)++; bhsdi->bhsdi_buffer_offset = htonl(buffer_offset); } KASSERT(i < ctl_sg_count, ("i >= ctl_sg_count")); if (sg_len == 0) { sg_addr = ctl_sglist[i].addr; sg_len = ctl_sglist[i].len; KASSERT(sg_len > 0, ("sg_len <= 0")); } len = sg_len; /* * Truncate to maximum data segment length. */ KASSERT(response->ip_data_len < cs->cs_max_send_data_segment_length, ("ip_data_len %zd >= max_send_data_segment_length %d", response->ip_data_len, cs->cs_max_send_data_segment_length)); if (response->ip_data_len + len > cs->cs_max_send_data_segment_length) { len = cs->cs_max_send_data_segment_length - response->ip_data_len; KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } /* * Truncate to expected data transfer length. */ KASSERT(buffer_offset + response->ip_data_len < expected_len, ("buffer_offset %zd + ip_data_len %zd >= expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len + len > expected_len) { CFISCSI_SESSION_DEBUG(cs, "truncating from %zd " "to expected data transfer length %zd", buffer_offset + response->ip_data_len + len, expected_len); len = expected_len - (buffer_offset + response->ip_data_len); KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } error = icl_pdu_append_data(response, sg_addr, len, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); icl_pdu_free(response); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } sg_addr += len; sg_len -= len; io->scsiio.kern_data_resid -= len; KASSERT(buffer_offset + response->ip_data_len <= expected_len, ("buffer_offset %zd + ip_data_len %zd > expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len == expected_len) { /* * Already have the amount of data the initiator wanted. */ break; } if (sg_len == 0) { /* * End of scatter-gather segment; * proceed to the next one... */ if (i == ctl_sg_count - 1) { /* * ... unless this was the last one. */ break; } i++; } if (response->ip_data_len == cs->cs_max_send_data_segment_length) { /* * Can't stuff more data into the current PDU; * queue it. Note that's not enough to check * for kern_data_resid == 0 instead; there * may be several Data-In PDUs for the final * call to cfiscsi_datamove(), and we want * to set the F flag only on the last of them. */ buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { buffer_offset -= response->ip_data_len; break; } cfiscsi_pdu_queue(response); response = NULL; bhsdi = NULL; } } if (response != NULL) { buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_F; if (io->io_hdr.status == CTL_SUCCESS) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_S; if (PDU_TOTAL_TRANSFER_LEN(request) < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhsdi->bhsdi_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - PDU_TOTAL_TRANSFER_LEN(request)); } else if (PDU_TOTAL_TRANSFER_LEN(request) > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhsdi->bhsdi_residual_count = htonl(PDU_TOTAL_TRANSFER_LEN(request) - ntohl(bhssc->bhssc_expected_data_transfer_length)); } bhsdi->bhsdi_status = io->scsiio.scsi_status; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } } KASSERT(response->ip_data_len > 0, ("sending empty Data-In")); cfiscsi_pdu_queue(response); } io->scsiio.be_move_done(io); } static void cfiscsi_datamove_out(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_r2t *bhsr2t; struct cfiscsi_data_wait *cdw; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; uint32_t expected_len, datamove_len, r2t_off, r2t_len; uint32_t target_transfer_tag; bool done; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); /* * We need to record it so that we can properly report * underflow/underflow. */ PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len; /* * Complete write underflow. Not a single byte to read. Return. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); - if (io->scsiio.kern_rel_offset > expected_len) { + if (io->scsiio.kern_rel_offset >= expected_len) { io->scsiio.be_move_done(io); return; } datamove_len = MIN(io->scsiio.kern_data_len, expected_len - io->scsiio.kern_rel_offset); target_transfer_tag = atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1); cdw = cfiscsi_data_wait_new(cs, io, bhssc->bhssc_initiator_task_tag, &target_transfer_tag); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } #if 0 CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator " "task tag 0x%x, target transfer tag 0x%x", bhssc->bhssc_initiator_task_tag, target_transfer_tag); #endif cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = target_transfer_tag; cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag; cdw->cdw_r2t_end = datamove_len; cdw->cdw_datasn = 0; /* Set initial data pointer for the CDW respecting ext_data_filled. */ if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = datamove_len; } cdw->cdw_sg_index = 0; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; r2t_off = io->scsiio.ext_data_filled; while (r2t_off > 0) { if (r2t_off >= cdw->cdw_sg_len) { r2t_off -= cdw->cdw_sg_len; cdw->cdw_sg_index++; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; continue; } cdw->cdw_sg_addr += r2t_off; cdw->cdw_sg_len -= r2t_off; r2t_off = 0; } if (cs->cs_immediate_data && io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled < icl_pdu_data_segment_length(request)) { done = cfiscsi_handle_data_segment(request, cdw); if (done) { cfiscsi_data_wait_free(cs, cdw); io->scsiio.be_move_done(io); return; } } r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled; r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled, cs->cs_max_burst_length); cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len; CFISCSI_SESSION_LOCK(cs); TAILQ_INSERT_TAIL(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * XXX: We should limit the number of outstanding R2T PDUs * per task to MaxOutstandingR2T. */ response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs; bhsr2t->bhsr2t_opcode = ISCSI_BHS_OPCODE_R2T; bhsr2t->bhsr2t_flags = 0x80; bhsr2t->bhsr2t_lun = bhssc->bhssc_lun; bhsr2t->bhsr2t_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsr2t->bhsr2t_target_transfer_tag = target_transfer_tag; /* * XXX: Here we assume that cfiscsi_datamove() won't ever * be running concurrently on several CPUs for a given * command. */ bhsr2t->bhsr2t_r2tsn = htonl(PDU_R2TSN(request)); PDU_R2TSN(request)++; /* * This is the offset within the current SCSI command; * i.e. for the first call of datamove(), it will be 0, * and for subsequent ones it will be the sum of lengths * of previous ones. * * The ext_data_filled is to account for unsolicited * (immediate) data that might have already arrived. */ bhsr2t->bhsr2t_buffer_offset = htonl(r2t_off); /* * This is the total length (sum of S/G lengths) this call * to cfiscsi_datamove() is supposed to handle, limited by * MaxBurstLength. */ bhsr2t->bhsr2t_desired_data_transfer_length = htonl(r2t_len); cfiscsi_pdu_queue(response); } static void cfiscsi_datamove(union ctl_io *io) { if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) cfiscsi_datamove_in(io); else { /* We hadn't received anything during this datamove yet. */ io->scsiio.ext_data_filled = 0; cfiscsi_datamove_out(io); } } static void cfiscsi_scsi_command_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_scsi_response *bhssr; #ifdef DIAGNOSTIC struct cfiscsi_data_wait *cdw; #endif struct cfiscsi_session *cs; uint16_t sense_length; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("replying to wrong opcode 0x%x", bhssc->bhssc_opcode)); //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); #ifdef DIAGNOSTIC CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) KASSERT(bhssc->bhssc_initiator_task_tag != cdw->cdw_initiator_task_tag, ("dangling cdw")); CFISCSI_SESSION_UNLOCK(cs); #endif /* * Do not return status for aborted commands. * There are exceptions, but none supported by CTL yet. */ if (((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) || (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)) { ctl_free_io(io); icl_pdu_free(request); return; } response = cfiscsi_pdu_new_response(request, M_WAITOK); bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; bhssr->bhssr_opcode = ISCSI_BHS_OPCODE_SCSI_RESPONSE; bhssr->bhssr_flags = 0x80; /* * XXX: We don't deal with bidirectional under/overflows; * does anything actually support those? */ if (PDU_TOTAL_TRANSFER_LEN(request) < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhssr->bhssr_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - PDU_TOTAL_TRANSFER_LEN(request)); //CFISCSI_SESSION_DEBUG(cs, "underflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } else if (PDU_TOTAL_TRANSFER_LEN(request) > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhssr->bhssr_residual_count = htonl(PDU_TOTAL_TRANSFER_LEN(request) - ntohl(bhssc->bhssc_expected_data_transfer_length)); //CFISCSI_SESSION_DEBUG(cs, "overflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } bhssr->bhssr_response = BHSSR_RESPONSE_COMMAND_COMPLETED; bhssr->bhssr_status = io->scsiio.scsi_status; bhssr->bhssr_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhssr->bhssr_expdatasn = htonl(PDU_EXPDATASN(request)); if (io->scsiio.sense_len > 0) { #if 0 CFISCSI_SESSION_DEBUG(cs, "returning %d bytes of sense data", io->scsiio.sense_len); #endif sense_length = htons(io->scsiio.sense_len); icl_pdu_append_data(response, &sense_length, sizeof(sense_length), M_WAITOK); icl_pdu_append_data(response, &io->scsiio.sense_data, io->scsiio.sense_len, M_WAITOK); } ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_task_management_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct cfiscsi_data_wait *cdw, *tmpcdw; struct cfiscsi_session *cs, *tcs; struct cfiscsi_softc *softc; int cold_reset = 0; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; KASSERT((bhstmr->bhstmr_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_TASK_REQUEST, ("replying to wrong opcode 0x%x", bhstmr->bhstmr_opcode)); #if 0 CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x; referenced task tag 0x%x", bhstmr->bhstmr_initiator_task_tag, bhstmr->bhstmr_referenced_task_tag); #endif if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_ABORT_TASK) { /* * Make sure we no longer wait for Data-Out for this command. */ CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH_SAFE(cdw, &cs->cs_waiting_for_data_out, cdw_next, tmpcdw) { if (bhstmr->bhstmr_referenced_task_tag != cdw->cdw_initiator_task_tag) continue; #if 0 CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task " "tag 0x%x", bhstmr->bhstmr_initiator_task_tag); #endif TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 43; cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); cfiscsi_data_wait_free(cs, cdw); } CFISCSI_SESSION_UNLOCK(cs); } if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_TARGET_COLD_RESET && io->io_hdr.status == CTL_SUCCESS) cold_reset = 1; response = cfiscsi_pdu_new_response(request, M_WAITOK); bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_SUCCEEDED; break; case CTL_TASK_LUN_DOES_NOT_EXIST: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_LUN_DOES_NOT_EXIST; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: default: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; break; } memcpy(bhstmr2->bhstmr_additional_reponse_information, io->taskio.task_resp, sizeof(io->taskio.task_resp)); bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); if (cold_reset) { softc = cs->cs_target->ct_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(tcs, &softc->sessions, cs_next) { if (tcs->cs_target == cs->cs_target) cfiscsi_session_terminate(tcs); } mtx_unlock(&softc->lock); } } static void cfiscsi_done(union ctl_io *io) { struct icl_pdu *request; struct cfiscsi_session *cs; KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); if (io->io_hdr.io_type == CTL_IO_TASK && io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) { /* * Implicit task termination has just completed; nothing to do. */ cs = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs->cs_tasks_aborted = true; refcount_release(&cs->cs_outstanding_ctl_pdus); wakeup(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus)); ctl_free_io(io); return; } request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_scsi_command_done(io); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_task_management_done(io); break; default: panic("cfiscsi_done called with wrong opcode 0x%x", request->ip_bhs->bhs_opcode); } refcount_release(&cs->cs_outstanding_ctl_pdus); } Index: projects/clang400-import/sys/cam/ctl/ctl_tpc_local.c =================================================================== --- projects/clang400-import/sys/cam/ctl/ctl_tpc_local.c (revision 312719) +++ projects/clang400-import/sys/cam/ctl/ctl_tpc_local.c (revision 312720) @@ -1,332 +1,330 @@ /*- * Copyright (c) 2014 Alexander Motin * Copyright (c) 2004, 2005 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct tpcl_softc { struct ctl_port port; int cur_tag_num; }; static struct tpcl_softc tpcl_softc; static int tpcl_init(void); static int tpcl_shutdown(void); static void tpcl_datamove(union ctl_io *io); static void tpcl_done(union ctl_io *io); static struct ctl_frontend tpcl_frontend = { .name = "tpc", .init = tpcl_init, .shutdown = tpcl_shutdown, }; CTL_FRONTEND_DECLARE(ctltpc, tpcl_frontend); static int tpcl_init(void) { struct tpcl_softc *tsoftc = &tpcl_softc; struct ctl_port *port; struct scsi_transportid_spi *tid; int error, len; memset(tsoftc, 0, sizeof(*tsoftc)); port = &tsoftc->port; port->frontend = &tpcl_frontend; port->port_type = CTL_PORT_INTERNAL; port->num_requested_ctl_io = 100; port->port_name = "tpc"; port->fe_datamove = tpcl_datamove; port->fe_done = tpcl_done; - port->max_targets = 1; - port->max_target_id = 0; port->targ_port = -1; port->max_initiators = 1; if ((error = ctl_port_register(port)) != 0) { printf("%s: tpc port registration failed\n", __func__); return (error); } len = sizeof(struct scsi_transportid_spi); port->init_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->init_devid->len = len; tid = (struct scsi_transportid_spi *)port->init_devid->data; tid->format_protocol = SCSI_TRN_SPI_FORMAT_DEFAULT | SCSI_PROTO_SPI; scsi_ulto2b(0, tid->scsi_addr); scsi_ulto2b(port->targ_port, tid->rel_trgt_port_id); ctl_port_online(port); return (0); } static int tpcl_shutdown(void) { struct tpcl_softc *tsoftc = &tpcl_softc; struct ctl_port *port = &tsoftc->port; int error; ctl_port_offline(port); if ((error = ctl_port_deregister(port)) != 0) printf("%s: tpc port deregistration failed\n", __func__); return (error); } static void tpcl_datamove(union ctl_io *io) { struct ctl_sg_entry *ext_sglist, *kern_sglist; struct ctl_sg_entry ext_entry, kern_entry; int ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; int len_to_copy; int kern_watermark, ext_watermark; struct ctl_scsiio *ctsio; int i, j; CTL_DEBUG_PRINT(("%s\n", __func__)); ctsio = &io->scsiio; /* * If this is the case, we're probably doing a BBR read and don't * actually need to transfer the data. This will effectively * bit-bucket the data. */ if (ctsio->ext_data_ptr == NULL) goto bailout; /* * To simplify things here, if we have a single buffer, stick it in * a S/G entry and just make it a single entry S/G list. */ if (ctsio->ext_sg_entries > 0) { int len_seen; ext_sglist = (struct ctl_sg_entry *)ctsio->ext_data_ptr; ext_sg_entries = ctsio->ext_sg_entries; ext_sg_start = 0; ext_offset = 0; len_seen = 0; for (i = 0; i < ext_sg_entries; i++) { if ((len_seen + ext_sglist[i].len) >= ctsio->ext_data_filled) { ext_sg_start = i; ext_offset = ctsio->ext_data_filled - len_seen; break; } len_seen += ext_sglist[i].len; } } else { ext_sglist = &ext_entry; ext_sglist->addr = ctsio->ext_data_ptr; ext_sglist->len = ctsio->ext_data_len; ext_sg_entries = 1; ext_sg_start = 0; ext_offset = ctsio->ext_data_filled; } if (ctsio->kern_sg_entries > 0) { kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; kern_sg_entries = ctsio->kern_sg_entries; } else { kern_sglist = &kern_entry; kern_sglist->addr = ctsio->kern_data_ptr; kern_sglist->len = ctsio->kern_data_len; kern_sg_entries = 1; } kern_watermark = 0; ext_watermark = ext_offset; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; len_to_copy = min(ext_sglist[i].len - ext_watermark, kern_sglist[j].len - kern_watermark); ext_ptr = (uint8_t *)ext_sglist[i].addr; ext_ptr = ext_ptr + ext_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__, kern_ptr, ext_ptr)); memcpy(ext_ptr, kern_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__, ext_ptr, kern_ptr)); memcpy(kern_ptr, ext_ptr, len_to_copy); } ctsio->ext_data_filled += len_to_copy; ctsio->kern_data_resid -= len_to_copy; ext_watermark += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } kern_watermark += len_to_copy; if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n", __func__, ext_sg_entries, kern_sg_entries)); CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n", __func__, ctsio->ext_data_len, ctsio->kern_data_len)); bailout: io->scsiio.be_move_done(io); } static void tpcl_done(union ctl_io *io) { tpc_done(io); } uint64_t tpcl_resolve(struct ctl_softc *softc, int init_port, struct scsi_ec_cscd *cscd, uint32_t *ss, uint32_t *ps, uint32_t *pso) { struct scsi_ec_cscd_id *cscdid; struct ctl_port *port; struct ctl_lun *lun; uint64_t lunid = UINT64_MAX; if (cscd->type_code != EC_CSCD_ID || (cscd->luidt_pdt & EC_LUIDT_MASK) != EC_LUIDT_LUN || (cscd->luidt_pdt & EC_NUL) != 0) return (lunid); cscdid = (struct scsi_ec_cscd_id *)cscd; mtx_lock(&softc->ctl_lock); if (init_port >= 0) port = softc->ctl_ports[init_port]; else port = NULL; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (port != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; if (lun->lun_devid == NULL) continue; if (scsi_devid_match(lun->lun_devid->data, lun->lun_devid->len, &cscdid->codeset, cscdid->length + 4) == 0) { lunid = lun->lun; if (ss && lun->be_lun) *ss = lun->be_lun->blocksize; if (ps && lun->be_lun) *ps = lun->be_lun->blocksize << lun->be_lun->pblockexp; if (pso && lun->be_lun) *pso = lun->be_lun->blocksize * lun->be_lun->pblockoff; break; } } mtx_unlock(&softc->ctl_lock); return (lunid); }; union ctl_io * tpcl_alloc_io(void) { struct tpcl_softc *tsoftc = &tpcl_softc; return (ctl_alloc_io(tsoftc->port.ctl_pool_ref)); }; int tpcl_queue(union ctl_io *io, uint64_t lun) { struct tpcl_softc *tsoftc = &tpcl_softc; io->io_hdr.nexus.initid = 0; io->io_hdr.nexus.targ_port = tsoftc->port.targ_port; io->io_hdr.nexus.targ_lun = lun; io->scsiio.tag_num = atomic_fetchadd_int(&tsoftc->cur_tag_num, 1); io->scsiio.ext_data_filled = 0; return (ctl_queue(io)); } Index: projects/clang400-import/sys/cam/ctl/scsi_ctl.c =================================================================== --- projects/clang400-import/sys/cam/ctl/scsi_ctl.c (revision 312719) +++ projects/clang400-import/sys/cam/ctl/scsi_ctl.c (revision 312720) @@ -1,2102 +1,2096 @@ /*- * Copyright (c) 2008, 2009 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ */ /* * Peripheral driver interface between CAM and CTL (CAM Target Layer). * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctlfe_softc { struct ctl_port port; path_id_t path_id; target_id_t target_id; uint32_t hba_misc; u_int maxio; struct cam_sim *sim; char port_name[DEV_IDLEN]; struct mtx lun_softc_mtx; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; struct mtx ctlfe_list_mtx; static char ctlfe_mtx_desc[] = "ctlfelist"; #ifdef CTLFE_INIT_ENABLE static int ctlfe_max_targets = 1; static int ctlfe_num_targets = 0; #endif typedef enum { CTLFE_LUN_NONE = 0x00, CTLFE_LUN_WILDCARD = 0x01 } ctlfe_lun_flags; struct ctlfe_lun_softc { struct ctlfe_softc *parent_softc; struct cam_periph *periph; ctlfe_lun_flags flags; uint64_t ccbs_alloced; uint64_t ccbs_freed; uint64_t ctios_sent; uint64_t ctios_returned; uint64_t atios_alloced; uint64_t atios_freed; uint64_t inots_alloced; uint64_t inots_freed; /* bus_dma_tag_t dma_tag; */ TAILQ_HEAD(, ccb_hdr) work_queue; STAILQ_ENTRY(ctlfe_lun_softc) links; }; typedef enum { CTLFE_CMD_NONE = 0x00, CTLFE_CMD_PIECEWISE = 0x01 } ctlfe_cmd_flags; struct ctlfe_cmd_info { int cur_transfer_index; size_t cur_transfer_off; ctlfe_cmd_flags flags; /* * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 * bytes on amd64. So with 32 elements, this is 256 bytes on * i386 and 512 bytes on amd64. */ #define CTLFE_MAX_SEGS 32 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; }; /* * When we register the adapter/bus, request that this many ctl_ios be * allocated. This should be the maximum supported by the adapter, but we * currently don't have a way to get that back from the path inquiry. * XXX KDM add that to the path inquiry. */ #define CTLFE_REQ_CTL_IO 4096 /* * Number of Accept Target I/O CCBs to allocate and queue down to the * adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_ATIO_PER_LUN 1024 /* * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to * allocate and queue down to the adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_IN_PER_LUN 1024 /* * Timeout (in seconds) on CTIO CCB allocation for doing a DMA or sending * status to the initiator. The SIM is expected to have its own timeouts, * so we're not putting this timeout around the CCB execution time. The * SIM should timeout and let us know if it has an issue. */ #define CTLFE_DMA_TIMEOUT 60 /* * Turn this on to enable extra debugging prints. */ #if 0 #define CTLFE_DEBUG #endif /* * Use randomly assigned WWNN/WWPN values. This is to work around an issue * in the FreeBSD initiator that makes it unable to rescan the target if * the target gets rebooted and the WWNN/WWPN stay the same. */ #if 0 #define RANDOM_WWNN #endif MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); #define io_ptr ppriv_ptr0 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 #define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) #define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) static int ctlfeinitialize(void); static int ctlfeshutdown(void); static periph_init_t ctlfeperiphinit; static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static periph_ctor_t ctlferegister; static periph_oninv_t ctlfeoninvalidate; static periph_dtor_t ctlfecleanup; static periph_start_t ctlfestart; static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb); static void ctlfe_onoffline(void *arg, int online); static void ctlfe_online(void *arg); static void ctlfe_offline(void *arg); static int ctlfe_lun_enable(void *arg, int lun_id); static int ctlfe_lun_disable(void *arg, int lun_id); static void ctlfe_dump_sim(struct cam_sim *sim); static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); static void ctlfe_datamove(union ctl_io *io); static void ctlfe_done(union ctl_io *io); static void ctlfe_dump(void); static struct periph_driver ctlfe_driver = { ctlfeperiphinit, "ctl", TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, CAM_PERIPH_DRV_EARLY }; static struct ctl_frontend ctlfe_frontend = { .name = "camtgt", .init = ctlfeinitialize, .fe_dump = ctlfe_dump, .shutdown = ctlfeshutdown, }; CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); static int ctlfeshutdown(void) { /* CAM does not support periph driver unregister now. */ return (EBUSY); } static int ctlfeinitialize(void) { STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); periphdriver_register(&ctlfe_driver); return (0); } static void ctlfeperiphinit(void) { cam_status status; status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } } static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ctlfe_softc *softc; #ifdef CTLFEDEBUG printf("%s: entered\n", __func__); #endif mtx_lock(&ctlfe_list_mtx); STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { if (softc->path_id == xpt_path_path_id(path)) break; } mtx_unlock(&ctlfe_list_mtx); /* * When a new path gets registered, and it is capable of target * mode, go ahead and attach. Later on, we may need to be more * selective, but for now this will be sufficient. */ switch (code) { case AC_PATH_REGISTERED: { struct ctl_port *port; struct ccb_pathinq *cpi; int retval; cpi = (struct ccb_pathinq *)arg; /* Don't attach if it doesn't support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { #ifdef CTLFEDEBUG printf("%s: SIM %s%d doesn't support target mode\n", __func__, cpi->dev_name, cpi->unit_number); #endif break; } if (softc != NULL) { #ifdef CTLFEDEBUG printf("%s: CTL port for CAM path %u already exists\n", __func__, xpt_path_path_id(path)); #endif break; } #ifdef CTLFE_INIT_ENABLE if (ctlfe_num_targets >= ctlfe_max_targets) { union ccb *ccb; ccb = (union ccb *)malloc(sizeof(*ccb), M_TEMP, M_NOWAIT | M_ZERO); if (ccb == NULL) { printf("%s: unable to malloc CCB!\n", __func__); return; } xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; ccb->knob.xport_specific.fc.role = KNOB_ROLE_INITIATOR; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: SIM %s%d (path id %d) initiator " "enable failed with status %#x\n", __func__, cpi->dev_name, cpi->unit_number, cpi->ccb_h.path_id, ccb->ccb_h.status); } else { printf("%s: SIM %s%d (path id %d) initiator " "enable succeeded\n", __func__, cpi->dev_name, cpi->unit_number, cpi->ccb_h.path_id); } free(ccb, M_TEMP); break; } else { ctlfe_num_targets++; } printf("%s: ctlfe_num_targets = %d\n", __func__, ctlfe_num_targets); #endif /* CTLFE_INIT_ENABLE */ /* * We're in an interrupt context here, so we have to * use M_NOWAIT. Of course this means trouble if we * can't allocate memory. */ softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("%s: unable to malloc %zd bytes for softc\n", __func__, sizeof(*softc)); return; } softc->path_id = cpi->ccb_h.path_id; softc->target_id = cpi->initiator_id; softc->sim = xpt_path_sim(path); softc->hba_misc = cpi->hba_misc; if (cpi->maxio != 0) softc->maxio = cpi->maxio; else softc->maxio = DFLTPHYS; mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_softc_list); port = &softc->port; port->frontend = &ctlfe_frontend; /* * XXX KDM should we be more accurate here ? */ if (cpi->transport == XPORT_FC) port->port_type = CTL_PORT_FC; else if (cpi->transport == XPORT_SAS) port->port_type = CTL_PORT_SAS; else port->port_type = CTL_PORT_SCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "%s%d", cpi->dev_name, cpi->unit_number); /* * XXX KDM it would be nice to allocate storage in the * frontend structure itself. */ port->port_name = softc->port_name; port->physical_port = cpi->bus_id; port->virtual_port = 0; port->port_online = ctlfe_online; port->port_offline = ctlfe_offline; port->onoff_arg = softc; port->lun_enable = ctlfe_lun_enable; port->lun_disable = ctlfe_lun_disable; port->targ_lun_arg = softc; port->fe_datamove = ctlfe_datamove; port->fe_done = ctlfe_done; - /* - * XXX KDM the path inquiry doesn't give us the maximum - * number of targets supported. - */ - port->max_targets = cpi->max_target; - port->max_target_id = cpi->max_target; port->targ_port = -1; /* * XXX KDM need to figure out whether we're the master or * slave. */ #ifdef CTLFEDEBUG printf("%s: calling ctl_port_register() for %s%d\n", __func__, cpi->dev_name, cpi->unit_number); #endif retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with " "error %d!\n", __func__, retval); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); break; } else { mtx_lock(&ctlfe_list_mtx); STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); mtx_unlock(&ctlfe_list_mtx); } break; } case AC_PATH_DEREGISTERED: { if (softc != NULL) { /* * XXX KDM are we certain at this point that there * are no outstanding commands for this frontend? */ mtx_lock(&ctlfe_list_mtx); STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, links); mtx_unlock(&ctlfe_list_mtx); ctl_port_deregister(&softc->port); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); } break; } case AC_CONTRACT: { struct ac_contract *ac; ac = (struct ac_contract *)arg; switch (ac->contract_number) { case AC_CONTRACT_DEV_CHG: { struct ac_device_changed *dev_chg; int retval; dev_chg = (struct ac_device_changed *)ac->contract_data; printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", __func__, dev_chg->wwpn, dev_chg->port, xpt_path_path_id(path), dev_chg->target, (dev_chg->arrived == 0) ? "left" : "arrived"); if (softc == NULL) { printf("%s: CTL port for CAM path %u not " "found!\n", __func__, xpt_path_path_id(path)); break; } if (dev_chg->arrived != 0) { retval = ctl_add_initiator(&softc->port, dev_chg->target, dev_chg->wwpn, NULL); } else { retval = ctl_remove_initiator(&softc->port, dev_chg->target); } if (retval < 0) { printf("%s: could not %s port %d iid %u " "WWPN %#jx!\n", __func__, (dev_chg->arrived != 0) ? "add" : "remove", softc->port.targ_port, dev_chg->target, (uintmax_t)dev_chg->wwpn); } break; } default: printf("%s: unsupported contract number %ju\n", __func__, (uintmax_t)ac->contract_number); break; } break; } default: break; } } static cam_status ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; union ccb en_lun_ccb; cam_status status; int i; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; TAILQ_INIT(&softc->work_queue); softc->periph = periph; periph->softc = softc; xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; en_lun_ccb.cel.grp6_len = 0; en_lun_ccb.cel.grp7_len = 0; en_lun_ccb.cel.enable = 1; xpt_action(&en_lun_ccb); status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", __func__, en_lun_ccb.ccb_h.status); return (status); } status = CAM_REQ_CMP; for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; struct ctlfe_cmd_info *cmd_info; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, M_ZERO | M_NOWAIT); if (cmd_info == NULL) { ctl_free_io(new_io); free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } PRIV_INFO(new_io) = cmd_info; softc->atios_alloced++; new_ccb->ccb_h.io_ptr = new_io; xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { free(cmd_info, M_CTLFE); ctl_free_io(new_io); free(new_ccb, M_CTLFE); break; } } status = cam_periph_acquire(periph); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: could not acquire reference " "count, status = %#x\n", __func__, status); return (status); } if (i == 0) { xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " "status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } softc->inots_alloced++; new_ccb->ccb_h.io_ptr = new_io; xpt_setup_ccb(&new_ccb->ccb_h, periph->path, /*priority*/ 1); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { /* * Note that we don't free the CCB here. If the * status is not CAM_REQ_INPROG, then we're * probably talking to a SIM that says it is * target-capable but doesn't support the * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the * older API. In that case, it'll call xpt_done() * on the CCB, and we need to free it in our done * routine as a result. */ break; } } if ((i == 0) || (status != CAM_REQ_INPROG)) { xpt_print(periph->path, "%s: could not allocate immediate " "notify CCBs, status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); return (CAM_REQ_CMP); } static void ctlfeoninvalidate(struct cam_periph *periph) { union ccb en_lun_ccb; cam_status status; struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; xpt_setup_ccb(&en_lun_ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); en_lun_ccb.ccb_h.func_code = XPT_EN_LUN; en_lun_ccb.cel.grp6_len = 0; en_lun_ccb.cel.grp7_len = 0; en_lun_ccb.cel.enable = 0; xpt_action(&en_lun_ccb); status = (en_lun_ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", __func__, en_lun_ccb.ccb_h.status); /* * XXX KDM what do we do now? */ } bus_softc = softc->parent_softc; mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; KASSERT(softc->ccbs_freed == softc->ccbs_alloced, ("%s: " "ccbs_freed %ju != ccbs_alloced %ju", __func__, softc->ccbs_freed, softc->ccbs_alloced)); KASSERT(softc->ctios_returned == softc->ctios_sent, ("%s: " "ctios_returned %ju != ctios_sent %ju", __func__, softc->ctios_returned, softc->ctios_sent)); KASSERT(softc->atios_freed == softc->atios_alloced, ("%s: " "atios_freed %ju != atios_alloced %ju", __func__, softc->atios_freed, softc->atios_alloced)); KASSERT(softc->inots_freed == softc->inots_alloced, ("%s: " "inots_freed %ju != inots_alloced %ju", __func__, softc->inots_freed, softc->inots_alloced)); free(softc, M_CTLFE); } static void ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, u_int16_t *sglist_cnt) { struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ctl_sg_entry *ctl_sglist; bus_dma_segment_t *cam_sglist; size_t off; int i, idx; cmd_info = PRIV_INFO(io); bus_softc = softc->parent_softc; /* * Set the direction, relative to the initiator. */ *flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) *flags |= CAM_DIR_IN; else *flags |= CAM_DIR_OUT; *flags &= ~CAM_DATA_MASK; idx = cmd_info->cur_transfer_index; off = cmd_info->cur_transfer_off; cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ /* One time shift for SRR offset. */ off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; *data_ptr = io->scsiio.kern_data_ptr + off; if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { *dxfer_len = io->scsiio.kern_data_len - off; } else { *dxfer_len = bus_softc->maxio; cmd_info->cur_transfer_off += bus_softc->maxio; cmd_info->flags |= CTLFE_CMD_PIECEWISE; } *sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_PADDR; else *flags |= CAM_DATA_VADDR; } else { /* S/G list with physical or virtual pointers. */ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; /* One time shift for SRR offset. */ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; idx++; off = 0; } off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; cam_sglist = cmd_info->cam_sglist; *dxfer_len = 0; for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { cam_sglist[i].ds_addr = (bus_addr_t)ctl_sglist[i + idx].addr + off; if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; *dxfer_len += cam_sglist[i].ds_len; } else { cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; cmd_info->cur_transfer_index = idx + i; cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; cmd_info->flags |= CTLFE_CMD_PIECEWISE; *dxfer_len += cam_sglist[i].ds_len; if (ctl_sglist[i].len != 0) i++; break; } if (i == (CTLFE_MAX_SEGS - 1) && idx + i < (io->scsiio.kern_sg_entries - 1)) { cmd_info->cur_transfer_index = idx + i + 1; cmd_info->cur_transfer_off = 0; cmd_info->flags |= CTLFE_CMD_PIECEWISE; i++; break; } off = 0; } *sglist_cnt = i; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_SG_PADDR; else *flags |= CAM_DATA_SG; *data_ptr = (uint8_t *)cam_sglist; } } static void ctlfestart(struct cam_periph *periph, union ccb *start_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_cmd_info *cmd_info; struct ccb_hdr *ccb_h; struct ccb_accept_tio *atio; struct ccb_scsiio *csio; uint8_t *data_ptr; uint32_t dxfer_len; ccb_flags flags; union ctl_io *io; uint8_t scsi_status; softc = (struct ctlfe_lun_softc *)periph->softc; softc->ccbs_alloced++; ccb_h = TAILQ_FIRST(&softc->work_queue); if (ccb_h == NULL) { softc->ccbs_freed++; xpt_release_ccb(start_ccb); return; } /* Take the ATIO off the work queue */ TAILQ_REMOVE(&softc->work_queue, ccb_h, periph_links.tqe); atio = (struct ccb_accept_tio *)ccb_h; io = (union ctl_io *)ccb_h->io_ptr; csio = &start_ccb->csio; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); cmd_info = PRIV_INFO(io); cmd_info->cur_transfer_index = 0; cmd_info->cur_transfer_off = 0; cmd_info->flags = 0; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { /* * Datamove call, we need to setup the S/G list. */ ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); } else { /* * We're done, send status back. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; /* * If this command was aborted, we don't * need to send status back to the SIM. * Just free the CTIO and ctl_io, and * recycle the ATIO back to the SIM. */ xpt_print(periph->path, "%s: aborted " "command 0x%04x discarded\n", __func__, io->scsiio.tag_num); /* * For a wildcard attachment, commands can * come in with a specific target/lun. Reset * the target and LUN fields back to the * wildcard values before we send them back * down to the SIM. The SIM has a wildcard * LUN enabled, not whatever target/lun * these happened to be. */ if (softc->flags & CTLFE_LUN_WILDCARD) { atio->ccb_h.target_id = CAM_TARGET_WILDCARD; atio->ccb_h.target_lun = CAM_LUN_WILDCARD; } if (atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) { xpt_print(periph->path, "%s: func_code " "is %#x\n", __func__, atio->ccb_h.func_code); } start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; /* Tell the SIM that we've aborted this ATIO */ xpt_action(start_ccb); softc->ccbs_freed++; xpt_release_ccb(start_ccb); /* * Send the ATIO back down to the SIM. */ xpt_action((union ccb *)atio); /* * If we still have work to do, ask for * another CCB. Otherwise, deactivate our * callout. */ if (!TAILQ_EMPTY(&softc->work_queue)) xpt_schedule(periph, /*priority*/ 1); return; } data_ptr = NULL; dxfer_len = 0; csio->sglist_cnt = 0; } scsi_status = 0; if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || io->io_hdr.status == CTL_SUCCESS)) { flags |= CAM_SEND_STATUS; scsi_status = io->scsiio.scsi_status; csio->sense_len = io->scsiio.sense_len; #ifdef CTLFEDEBUG printf("%s: tag %04x status %x\n", __func__, atio->tag_id, io->io_hdr.status); #endif if (csio->sense_len != 0) { csio->sense_data = io->scsiio.sense_data; flags |= CAM_SEND_SENSE; } else if (scsi_status == SCSI_STATUS_CHECK_COND) { xpt_print(periph->path, "%s: check condition " "with no sense\n", __func__); } } #ifdef CTLFEDEBUG printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, (flags & CAM_SEND_STATUS) ? "done" : "datamove", atio->tag_id, flags, data_ptr, dxfer_len); #endif /* * Valid combinations: * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, * sglist_cnt != 0 */ #ifdef CTLFEDEBUG if (((flags & CAM_SEND_STATUS) && (((flags & CAM_DATA_SG) != 0) || (dxfer_len != 0) || (csio->sglist_cnt != 0))) || (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) || ((flags & CAM_DATA_SG) && (csio->sglist_cnt == 0)) || (((flags & CAM_DATA_SG) == 0) && (csio->sglist_cnt != 0))) { printf("%s: tag %04x cdb %02x flags %#x dxfer_len " "%d sg %u\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0], flags, dxfer_len, csio->sglist_cnt); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } #endif cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ 5 * 1000); start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.ccb_atio = atio; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); softc->ctios_sent++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); /* * If we still have work to do, ask for another CCB. */ if (!TAILQ_EMPTY(&softc->work_queue)) xpt_schedule(periph, /*priority*/ 1); } static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) { struct ctlfe_lun_softc *softc; union ctl_io *io; struct ctlfe_cmd_info *cmd_info; softc = (struct ctlfe_lun_softc *)periph->softc; io = ccb->ccb_h.io_ptr; switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: softc->atios_freed++; cmd_info = PRIV_INFO(io); free(cmd_info, M_CTLFE); break; case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: softc->inots_freed++; break; default: break; } ctl_free_io(io); free(ccb, M_CTLFE); KASSERT(softc->atios_freed <= softc->atios_alloced, ("%s: " "atios_freed %ju > atios_alloced %ju", __func__, softc->atios_freed, softc->atios_alloced)); KASSERT(softc->inots_freed <= softc->inots_alloced, ("%s: " "inots_freed %ju > inots_alloced %ju", __func__, softc->inots_freed, softc->inots_alloced)); /* * If we have received all of our CCBs, we can release our * reference on the peripheral driver. It will probably go away * now. */ if ((softc->atios_freed == softc->atios_alloced) && (softc->inots_freed == softc->inots_alloced)) { cam_periph_release_locked(periph); } } static int ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) { uint64_t lba; uint32_t num_blocks, nbc; uint8_t *cmdbyt = atio_cdb_ptr(atio); nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ switch (cmdbyt[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; lba = scsi_3btoul(cdb->addr); lba &= 0x1fffff; num_blocks = cdb->length; if (num_blocks == 0) num_blocks = 256; lba += nbc; num_blocks -= nbc; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto2b(num_blocks, cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } default: return -1; } return (0); } static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; struct mtx *mtx; KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x\n", __func__, done_ccb->ccb_h.func_code); #endif /* * At this point CTL has no known use case for device queue freezes. * In case some SIM think different -- drop its freeze right here. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); /* * If the peripheral is invalid, ATIOs and immediate notify CCBs * need to be freed. Most of the ATIOs and INOTs that come back * will be CCBs that are being returned from the SIM as a result of * our disabling the LUN. * * Other CCB types are handled in their respective cases below. */ if (periph->flags & CAM_PERIPH_INVALID) { switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: ctlfe_free_ccb(periph, done_ccb); goto out; default: break; } } switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { atio = &done_ccb->atio; resubmit: /* * Allocate a ctl_io, pass it to CTL, and wait for the * datamove or done. */ mtx_unlock(mtx); io = done_ccb->ccb_h.io_ptr; cmd_info = PRIV_INFO(io); ctl_zero_io(io); /* Save pointers on both sides */ PRIV_CCB(io) = done_ccb; PRIV_INFO(io) = cmd_info; done_ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down the immediate notify path below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = atio->init_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; } io->scsiio.tag_num = atio->tag_id; switch (atio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, atio->tag_action); break; } if (atio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len); #ifdef CTLFEDEBUG printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->scsiio.tag_num, io->scsiio.cdb[0]); #endif ctl_queue(io); return; } case XPT_CONT_TARGET_IO: { int srr = 0; uint32_t srr_off = 0; atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; io = (union ctl_io *)atio->ccb_h.io_ptr; softc->ctios_returned++; #ifdef CTLFEDEBUG printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", __func__, atio->tag_id, done_ccb->ccb_h.flags); #endif /* * Handle SRR case were the data pointer is pushed back hack */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV && done_ccb->csio.msg_ptr != NULL && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED && done_ccb->csio.msg_ptr[1] == 5 && done_ccb->csio.msg_ptr[2] == 0) { srr = 1; srr_off = (done_ccb->csio.msg_ptr[3] << 24) | (done_ccb->csio.msg_ptr[4] << 16) | (done_ccb->csio.msg_ptr[5] << 8) | (done_ccb->csio.msg_ptr[6]); } /* * If we have an SRR and we're still sending data, we * should be able to adjust offsets and cycle again. * It is possible only if offset is from this datamove. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && srr_off >= io->scsiio.kern_rel_offset && srr_off < io->scsiio.kern_rel_offset + io->scsiio.kern_data_len) { io->scsiio.kern_data_resid = io->scsiio.kern_rel_offset + io->scsiio.kern_data_len - srr_off; io->scsiio.ext_data_filled = srr_off; io->scsiio.io_hdr.status = CTL_STATUS_NONE; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; softc->ccbs_freed++; xpt_release_ccb(done_ccb); TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.tqe); xpt_schedule(periph, /*priority*/ 1); break; } /* * If status was being sent, the back end data is now history. * Hack it up and resubmit a new command with the CDB adjusted. * If the SIM does the right thing, all of the resid math * should work. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { softc->ccbs_freed++; xpt_release_ccb(done_ccb); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { done_ccb = (union ccb *)atio; goto resubmit; } /* * Fall through to doom.... */ } if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; /* * If we were sending status back to the initiator, free up * resources. If we were doing a datamove, call the * datamove done routine. */ if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { softc->ccbs_freed++; xpt_release_ccb(done_ccb); /* * For a wildcard attachment, commands can come in * with a specific target/lun. Reset the target * and LUN fields back to the wildcard values before * we send them back down to the SIM. The SIM has * a wildcard LUN enabled, not whatever target/lun * these happened to be. */ if (softc->flags & CTLFE_LUN_WILDCARD) { atio->ccb_h.target_id = CAM_TARGET_WILDCARD; atio->ccb_h.target_lun = CAM_LUN_WILDCARD; } if (periph->flags & CAM_PERIPH_INVALID) { ctlfe_free_ccb(periph, (union ccb *)atio); } else { mtx_unlock(mtx); xpt_action((union ccb *)atio); return; } } else { struct ctlfe_cmd_info *cmd_info; struct ccb_scsiio *csio; csio = &done_ccb->csio; cmd_info = PRIV_INFO(io); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In * that case we just set port_status to 0. If we * have a failure, though, set a data phase error * for the overall ctl_io. */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: io->scsiio.kern_data_resid -= csio->dxfer_len; io->io_hdr.port_status = 0; break; default: /* * XXX KDM we probably need to figure out a * standard set of errors that the SIM * drivers should return in the event of a * data transfer failure. A data phase * error will at least point the user to a * data transfer error of some sort. * Hopefully the SIM printed out some * additional information to give the user * a clue what happened. */ io->io_hdr.port_status = 0xbad1; ctl_set_data_phase_error(&io->scsiio); /* * XXX KDM figure out residual. */ break; } /* * If we had to break this S/G list into multiple * pieces, figure out where we are in the list, and * continue sending pieces if necessary. */ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && (io->io_hdr.port_status == 0)) { ccb_flags flags; uint8_t *data_ptr; uint32_t dxfer_len; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT| CAM_TAG_ACTION_VALID); ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " "len cdb = %02x\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0]); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ 5 * 1000); csio->ccb_h.flags |= CAM_UNLOCKED; csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; mtx_unlock(mtx); xpt_action((union ccb *)csio); } else { /* * Release the CTIO. The ATIO will be sent back * down to the SIM once we send status. */ softc->ccbs_freed++; xpt_release_ccb(done_ccb); mtx_unlock(mtx); /* Call the backend move done callback */ io->scsiio.be_move_done(io); } return; } break; } case XPT_IMMEDIATE_NOTIFY: { union ctl_io *io; struct ccb_immediate_notify *inot; cam_status status; int send_ctl_io; inot = &done_ccb->cin1; printf("%s: got XPT_IMMEDIATE_NOTIFY status %#x tag %#x " "seq %#x\n", __func__, inot->ccb_h.status, inot->tag_id, inot->seq_id); io = done_ccb->ccb_h.io_ptr; ctl_zero_io(io); send_ctl_io = 1; io->io_hdr.io_type = CTL_IO_TASK; PRIV_CCB(io) = done_ccb; inot->ccb_h.io_ptr = io; io->io_hdr.nexus.initid = inot->initiator_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; } /* XXX KDM should this be the tag_id? */ io->taskio.tag_num = inot->seq_id; status = inot->ccb_h.status & CAM_STATUS_MASK; switch (status) { case CAM_SCSI_BUS_RESET: io->taskio.task_action = CTL_TASK_BUS_RESET; break; case CAM_BDR_SENT: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case CAM_MESSAGE_RECV: switch (inot->arg) { case MSG_ABORT_TASK_SET: io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case MSG_TARGET_RESET: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case MSG_ABORT_TASK: io->taskio.task_action = CTL_TASK_ABORT_TASK; break; case MSG_LOGICAL_UNIT_RESET: io->taskio.task_action = CTL_TASK_LUN_RESET; break; case MSG_CLEAR_TASK_SET: io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case MSG_CLEAR_ACA: io->taskio.task_action = CTL_TASK_CLEAR_ACA; break; case MSG_QUERY_TASK: io->taskio.task_action = CTL_TASK_QUERY_TASK; break; case MSG_QUERY_TASK_SET: io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case MSG_QUERY_ASYNC_EVENT: io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; case MSG_NOOP: send_ctl_io = 0; break; default: xpt_print(periph->path, "%s: unsupported message 0x%x\n", __func__, inot->arg); send_ctl_io = 0; break; } break; case CAM_REQ_ABORTED: /* * This request was sent back by the driver. * XXX KDM what do we do here? */ send_ctl_io = 0; break; case CAM_REQ_INVALID: case CAM_PROVIDE_FAIL: default: /* * We should only get here if we're talking * to a talking to a SIM that is target * capable but supports the old API. In * that case, we need to just free the CCB. * If we actually send a notify acknowledge, * it will send that back with an error as * well. */ if ((status != CAM_REQ_INVALID) && (status != CAM_PROVIDE_FAIL)) xpt_print(periph->path, "%s: unsupported CAM status 0x%x\n", __func__, status); ctlfe_free_ccb(periph, done_ccb); goto out; } if (send_ctl_io != 0) { ctl_queue(io); } else { done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } break; } case XPT_NOTIFY_ACKNOWLEDGE: /* * Queue this back down to the SIM as an immediate notify. */ done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; xpt_action(done_ccb); break; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: case XPT_GET_SIM_KNOB_OLD: break; default: panic("%s: unexpected CCB type %#x", __func__, done_ccb->ccb_h.func_code); break; } out: mtx_unlock(mtx); } static void ctlfe_onoffline(void *arg, int online) { struct ctlfe_softc *bus_softc; union ccb *ccb; cam_status status; struct cam_path *path; int set_wwnn; bus_softc = (struct ctlfe_softc *)arg; set_wwnn = 0; status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path!\n", __func__); return; } ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; xpt_action(ccb); /* * Copan WWN format: * * Bits 63-60: 0x5 NAA, IEEE registered name * Bits 59-36: 0x000ED5 IEEE Company name assigned to Copan * Bits 35-12: Copan SSN (Sequential Serial Number) * Bits 11-8: Type of port: * 1 == N-Port * 2 == F-Port * 3 == NL-Port * Bits 7-0: 0 == Node Name, >0 == Port Number */ if (online != 0) { if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ #ifdef RANDOM_WWNN uint64_t random_bits; #endif printf("%s: %s current WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s current WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); #ifdef RANDOM_WWNN arc4rand(&random_bits, sizeof(random_bits), 0); #endif /* * XXX KDM this is a bit of a kludge for now. We * take the current WWNN/WWPN from the card, and * replace the company identifier and the NL-Port * indicator and the port number (for the WWPN). * This should be replaced later with ddb_GetWWNN, * or possibly a more centralized scheme. (It * would be nice to have the WWNN/WWPN for each * port stored in the ctl_port structure.) */ #ifdef RANDOM_WWNN ccb->knob.xport_specific.fc.wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000ED5000000000ULL | /* NL-Port */ 0x0300; ccb->knob.xport_specific.fc.wwpn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000ED5000000000ULL | /* NL-Port */ 0x3000 | /* Port Num */ (bus_softc->port.targ_port & 0xff); /* * This is a bit of an API break/reversal, but if * we're doing the random WWNN that's a little * different anyway. So record what we're actually * using with the frontend code so it's reported * accurately. */ ctl_port_set_wwns(&bus_softc->port, true, ccb->knob.xport_specific.fc.wwnn, true, ccb->knob.xport_specific.fc.wwpn); set_wwnn = 1; #else /* RANDOM_WWNN */ /* * If the user has specified a WWNN/WWPN, send them * down to the SIM. Otherwise, record what the SIM * has reported. */ if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn != ccb->knob.xport_specific.fc.wwnn) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, true, ccb->knob.xport_specific.fc.wwnn, false, 0); } if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn != ccb->knob.xport_specific.fc.wwpn) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, false, 0, true, ccb->knob.xport_specific.fc.wwpn); } #endif /* RANDOM_WWNN */ if (set_wwnn != 0) { printf("%s: %s new WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s new WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); } } else { printf("%s: %s has no valid WWNN/WWPN\n", __func__, bus_softc->port_name); } } ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; if (set_wwnn != 0) ccb->knob.xport_specific.valid |= KNOB_VALID_ADDRESS; if (online != 0) ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; else ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: SIM %s (path id %d) target %s failed with " "status %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, (online != 0) ? "enable" : "disable", ccb->ccb_h.status); } else { printf("%s: SIM %s (path id %d) target %s succeeded\n", __func__, bus_softc->port_name, bus_softc->path_id, (online != 0) ? "enable" : "disable"); } xpt_free_path(path); xpt_free_ccb(ccb); } static void ctlfe_online(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct ctlfe_lun_softc *lun_softc; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; /* * Create the wildcard LUN before bringing the port online. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(lun_softc, M_CTLFE); return; } lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, lun_softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(lun_softc, M_CTLFE); } xpt_path_unlock(path); ctlfe_onoffline(arg, /*online*/ 1); xpt_free_path(path); } static void ctlfe_offline(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; ctlfe_onoffline(arg, /*online*/ 0); /* * Disable the wildcard LUN for this port now that we have taken * the port offline. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } xpt_path_lock(path); if ((periph = cam_periph_find(path, "ctl")) != NULL) cam_periph_invalidate(periph); xpt_path_unlock(path); xpt_free_path(path); } /* * This will get called to enable a LUN on every bus that is attached to * CTL. So we only need to create a path/periph for this particular bus. */ static int ctlfe_lun_enable(void *arg, int lun_id) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; cam_status status; bus_softc = (struct ctlfe_softc *)arg; if (bus_softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, bus_softc->target_id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, status); return (1); } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(softc, M_CTLFE); return (0); } softc->parent_softc = bus_softc; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(softc, M_CTLFE); } xpt_path_unlock(path); xpt_free_path(path); return (0); } /* * This will get called when the user removes a LUN to disable that LUN * on every bus that is attached to CTL. */ static int ctlfe_lun_disable(void *arg, int lun_id) { struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; softc = (struct ctlfe_softc *)arg; if (softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); mtx_lock(&softc->lun_softc_mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; path = lun_softc->periph->path; if ((xpt_path_target_id(path) == softc->target_id) && (xpt_path_lun_id(path) == lun_id)) { break; } } if (lun_softc == NULL) { mtx_unlock(&softc->lun_softc_mtx); printf("%s: can't find lun %d\n", __func__, lun_id); return (1); } cam_periph_acquire(lun_softc->periph); mtx_unlock(&softc->lun_softc_mtx); cam_periph_lock(lun_softc->periph); cam_periph_invalidate(lun_softc->periph); cam_periph_unlock(lun_softc->periph); cam_periph_release(lun_softc->periph); return (0); } static void ctlfe_dump_sim(struct cam_sim *sim) { printf("%s%d: max tagged openings: %d, max dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_tagged_dev_openings, sim->max_dev_openings); } /* * Assumes that the SIM lock is held. */ static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc) { struct ccb_hdr *hdr; struct cam_periph *periph; int num_items; periph = softc->periph; num_items = 0; TAILQ_FOREACH(hdr, &softc->work_queue, periph_links.tqe) { union ctl_io *io = hdr->io_ptr; num_items++; /* * Only regular SCSI I/O is put on the work * queue, so we can print sense here. There may be no * sense if it's no the queue for a DMA, but this serves to * print out the CCB as well. * * XXX KDM switch this over to scsi_sense_print() when * CTL is merged in with CAM. */ ctl_io_error_print(io, NULL); /* * Print DMA status if we are DMA_QUEUED. */ if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", io->scsiio.kern_total_len, io->scsiio.kern_data_len, io->scsiio.kern_data_resid); } } xpt_print(periph->path, "%d requests total waiting for CCBs\n", num_items); xpt_print(periph->path, "%ju CCBs outstanding (%ju allocated, %ju " "freed)\n", (uintmax_t)(softc->ccbs_alloced - softc->ccbs_freed), (uintmax_t)softc->ccbs_alloced, (uintmax_t)softc->ccbs_freed); xpt_print(periph->path, "%ju CTIOs outstanding (%ju sent, %ju " "returned\n", (uintmax_t)(softc->ctios_sent - softc->ctios_returned), softc->ctios_sent, softc->ctios_returned); } /* * Datamove/done routine called by CTL. Put ourselves on the queue to * receive a CCB from CAM so we can queue the continue I/O request down * to the adapter. */ static void ctlfe_datamove(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Unexpected io_type (%d) in ctlfe_datamove", io->io_hdr.io_type)); io->scsiio.ext_data_filled = 0; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.tqe); xpt_schedule(periph, /*priority*/ 1); cam_periph_unlock(periph); } static void ctlfe_done(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; if (io->io_hdr.io_type == CTL_IO_TASK) { /* * Task management commands don't require any further * communication back to the adapter. Requeue the CCB * to the adapter, and free the CTL I/O. */ xpt_print(ccb->ccb_h.path, "%s: returning task I/O " "tag %#x seq %#x\n", __func__, ccb->cin1.tag_id, ccb->cin1.seq_id); /* * Send the notify acknowledge down to the SIM, to let it * know we processed the task management command. */ ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_REJECTED: ccb->cna2.arg = CAM_RSP_TMF_REJECTED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_LUN_DOES_NOT_EXIST: ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: ccb->cna2.arg = CAM_RSP_TMF_FAILED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; } ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; xpt_action(ccb); } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { if (softc->flags & CTLFE_LUN_WILDCARD) { ccb->ccb_h.target_id = CAM_TARGET_WILDCARD; ccb->ccb_h.target_lun = CAM_LUN_WILDCARD; } if (periph->flags & CAM_PERIPH_INVALID) { ctlfe_free_ccb(periph, ccb); } else { cam_periph_unlock(periph); xpt_action(ccb); return; } } else { io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.tqe); xpt_schedule(periph, /*priority*/ 1); } cam_periph_unlock(periph); } static void ctlfe_dump(void) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *lun_softc; STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { ctlfe_dump_sim(bus_softc->sim); STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) ctlfe_dump_queue(lun_softc); } } Index: projects/clang400-import/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c =================================================================== --- projects/clang400-import/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c (revision 312719) +++ projects/clang400-import/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c (revision 312720) @@ -1,18342 +1,18333 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END * * $FreeBSD$ */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, Joyent, Inc. All rights reserved. * Copyright (c) 2012, 2014 by Delphix. All rights reserved. */ /* * DTrace - Dynamic Tracing for Solaris * * This is the implementation of the Solaris Dynamic Tracing framework * (DTrace). The user-visible interface to DTrace is described at length in * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace * library, the in-kernel DTrace framework, and the DTrace providers are * described in the block comments in the header file. The * internal architecture of DTrace is described in the block comments in the * header file. The comments contained within the DTrace * implementation very much assume mastery of all of these sources; if one has * an unanswered question about the implementation, one should consult them * first. * * The functions here are ordered roughly as follows: * * - Probe context functions * - Probe hashing functions * - Non-probe context utility functions * - Matching functions * - Provider-to-Framework API functions * - Probe management functions * - DIF object functions * - Format functions * - Predicate functions * - ECB functions * - Buffer functions * - Enabling functions * - DOF functions * - Anonymous enabling functions * - Consumer state functions * - Helper functions * - Hook functions * - Driver cookbook functions * * Each group of functions begins with a block comment labelled the "DTrace * [Group] Functions", allowing one to find each block by searching forward * on capital-f functions. */ #include #ifndef illumos #include #endif #include #include #include #include #ifdef illumos #include #include #endif #include #include #ifdef illumos #include #endif #include #include #include #include #ifdef illumos #include #include #endif #include #ifdef illumos #include #include #endif #include #ifdef illumos #include #include #endif #include #ifdef illumos #include #include #endif #include #include #include #include "strtolctype.h" /* FreeBSD includes: */ #ifndef illumos #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dtrace_cddl.h" #include "dtrace_debug.c" #endif /* * DTrace Tunable Variables * * The following variables may be tuned by adding a line to /etc/system that * includes both the name of the DTrace module ("dtrace") and the name of the * variable. For example: * * set dtrace:dtrace_destructive_disallow = 1 * * In general, the only variables that one should be tuning this way are those * that affect system-wide DTrace behavior, and for which the default behavior * is undesirable. Most of these variables are tunable on a per-consumer * basis using DTrace options, and need not be tuned on a system-wide basis. * When tuning these variables, avoid pathological values; while some attempt * is made to verify the integrity of these variables, they are not considered * part of the supported interface to DTrace, and they are therefore not * checked comprehensively. Further, these variables should not be tuned * dynamically via "mdb -kw" or other means; they should only be tuned via * /etc/system. */ int dtrace_destructive_disallow = 0; #ifndef illumos /* Positive logic version of dtrace_destructive_disallow for loader tunable */ int dtrace_allow_destructive = 1; #endif dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); size_t dtrace_difo_maxsize = (256 * 1024); dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024); size_t dtrace_statvar_maxsize = (16 * 1024); size_t dtrace_actions_max = (16 * 1024); size_t dtrace_retain_max = 1024; dtrace_optval_t dtrace_helper_actions_max = 128; dtrace_optval_t dtrace_helper_providers_max = 32; dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); size_t dtrace_strsize_default = 256; dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ dtrace_optval_t dtrace_nspec_default = 1; dtrace_optval_t dtrace_specsize_default = 32 * 1024; dtrace_optval_t dtrace_stackframes_default = 20; dtrace_optval_t dtrace_ustackframes_default = 20; dtrace_optval_t dtrace_jstackframes_default = 50; dtrace_optval_t dtrace_jstackstrsize_default = 512; int dtrace_msgdsize_max = 128; hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */ hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ int dtrace_devdepth_max = 32; int dtrace_err_verbose; hrtime_t dtrace_deadman_interval = NANOSEC; hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; #ifndef illumos int dtrace_memstr_max = 4096; #endif /* * DTrace External Variables * * As dtrace(7D) is a kernel module, any DTrace variables are obviously * available to DTrace consumers via the backtick (`) syntax. One of these, * dtrace_zero, is made deliberately so: it is provided as a source of * well-known, zero-filled memory. While this variable is not documented, * it is used by some translators as an implementation detail. */ const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ /* * DTrace Internal Variables */ #ifdef illumos static dev_info_t *dtrace_devi; /* device info */ #endif #ifdef illumos static vmem_t *dtrace_arena; /* probe ID arena */ static vmem_t *dtrace_minor; /* minor number arena */ #else static taskq_t *dtrace_taskq; /* task queue */ static struct unrhdr *dtrace_arena; /* Probe ID number. */ #endif static dtrace_probe_t **dtrace_probes; /* array of all probes */ static int dtrace_nprobes; /* number of probes */ static dtrace_provider_t *dtrace_provider; /* provider list */ static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ static int dtrace_opens; /* number of opens */ static int dtrace_helpers; /* number of helpers */ static int dtrace_getf; /* number of unpriv getf()s */ #ifdef illumos static void *dtrace_softstate; /* softstate pointer */ #endif static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ static int dtrace_toxranges; /* number of toxic ranges */ static int dtrace_toxranges_max; /* size of toxic range array */ static dtrace_anon_t dtrace_anon; /* anonymous enabling */ static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ static kthread_t *dtrace_panicked; /* panicking thread */ static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ static dtrace_genid_t dtrace_probegen; /* current probe generation */ static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */ static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ static int dtrace_dynvar_failclean; /* dynvars failed to clean */ #ifndef illumos static struct mtx dtrace_unr_mtx; MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); static eventhandler_tag dtrace_kld_load_tag; static eventhandler_tag dtrace_kld_unload_try_tag; #endif /* * DTrace Locking * DTrace is protected by three (relatively coarse-grained) locks: * * (1) dtrace_lock is required to manipulate essentially any DTrace state, * including enabling state, probes, ECBs, consumer state, helper state, * etc. Importantly, dtrace_lock is _not_ required when in probe context; * probe context is lock-free -- synchronization is handled via the * dtrace_sync() cross call mechanism. * * (2) dtrace_provider_lock is required when manipulating provider state, or * when provider state must be held constant. * * (3) dtrace_meta_lock is required when manipulating meta provider state, or * when meta provider state must be held constant. * * The lock ordering between these three locks is dtrace_meta_lock before * dtrace_provider_lock before dtrace_lock. (In particular, there are * several places where dtrace_provider_lock is held by the framework as it * calls into the providers -- which then call back into the framework, * grabbing dtrace_lock.) * * There are two other locks in the mix: mod_lock and cpu_lock. With respect * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical * role as a coarse-grained lock; it is acquired before both of these locks. * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must * be acquired _between_ dtrace_meta_lock and any other DTrace locks. * mod_lock is similar with respect to dtrace_provider_lock in that it must be * acquired _between_ dtrace_provider_lock and dtrace_lock. */ static kmutex_t dtrace_lock; /* probe state lock */ static kmutex_t dtrace_provider_lock; /* provider state lock */ static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ #ifndef illumos /* XXX FreeBSD hacks. */ #define cr_suid cr_svuid #define cr_sgid cr_svgid #define ipaddr_t in_addr_t #define mod_modname pathname #define vuprintf vprintf #define ttoproc(_a) ((_a)->td_proc) #define crgetzoneid(_a) 0 #define NCPU MAXCPU #define SNOCD 0 #define CPU_ON_INTR(_a) 0 #define PRIV_EFFECTIVE (1 << 0) #define PRIV_DTRACE_KERNEL (1 << 1) #define PRIV_DTRACE_PROC (1 << 2) #define PRIV_DTRACE_USER (1 << 3) #define PRIV_PROC_OWNER (1 << 4) #define PRIV_PROC_ZONE (1 << 5) #define PRIV_ALL ~0 SYSCTL_DECL(_debug_dtrace); SYSCTL_DECL(_kern_dtrace); #endif #ifdef illumos #define curcpu CPU->cpu_id #endif /* * DTrace Provider Variables * * These are the variables relating to DTrace as a provider (that is, the * provider of the BEGIN, END, and ERROR probes). */ static dtrace_pattr_t dtrace_provider_attr = { { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, }; static void dtrace_nullop(void) {} static dtrace_pops_t dtrace_provider_ops = { (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, (void (*)(void *, modctl_t *))dtrace_nullop, (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, NULL, NULL, NULL, (void (*)(void *, dtrace_id_t, void *))dtrace_nullop }; static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ static dtrace_id_t dtrace_probeid_end; /* special END probe */ dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ /* * DTrace Helper Tracing Variables * * These variables should be set dynamically to enable helper tracing. The * only variables that should be set are dtrace_helptrace_enable (which should * be set to a non-zero value to allocate helper tracing buffers on the next * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a * non-zero value to deallocate helper tracing buffers on the next close of * /dev/dtrace). When (and only when) helper tracing is disabled, the * buffer size may also be set via dtrace_helptrace_bufsize. */ int dtrace_helptrace_enable = 0; int dtrace_helptrace_disable = 0; int dtrace_helptrace_bufsize = 16 * 1024 * 1024; uint32_t dtrace_helptrace_nlocals; static dtrace_helptrace_t *dtrace_helptrace_buffer; static uint32_t dtrace_helptrace_next = 0; static int dtrace_helptrace_wrapped = 0; /* * DTrace Error Hashing * * On DEBUG kernels, DTrace will track the errors that has seen in a hash * table. This is very useful for checking coverage of tests that are * expected to induce DIF or DOF processing errors, and may be useful for * debugging problems in the DIF code generator or in DOF generation . The * error hash may be examined with the ::dtrace_errhash MDB dcmd. */ #ifdef DEBUG static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; static const char *dtrace_errlast; static kthread_t *dtrace_errthread; static kmutex_t dtrace_errlock; #endif /* * DTrace Macros and Constants * * These are various macros that are useful in various spots in the * implementation, along with a few random constants that have no meaning * outside of the implementation. There is no real structure to this cpp * mishmash -- but is there ever? */ #define DTRACE_HASHSTR(hash, probe) \ dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) #define DTRACE_HASHNEXT(hash, probe) \ (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) #define DTRACE_HASHPREV(hash, probe) \ (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) #define DTRACE_HASHEQ(hash, lhs, rhs) \ (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) #define DTRACE_AGGHASHSIZE_SLEW 17 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) /* * The key for a thread-local variable consists of the lower 61 bits of the * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never * equal to a variable identifier. This is necessary (but not sufficient) to * assure that global associative arrays never collide with thread-local * variables. To guarantee that they cannot collide, we must also define the * order for keying dynamic variables. That order is: * * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] * * Because the variable-key and the tls-key are in orthogonal spaces, there is * no way for a global variable key signature to match a thread-local key * signature. */ #ifdef illumos #define DTRACE_TLS_THRKEY(where) { \ uint_t intr = 0; \ uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ for (; actv; actv >>= 1) \ intr++; \ ASSERT(intr < (1 << 3)); \ (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ } #else #define DTRACE_TLS_THRKEY(where) { \ solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ uint_t intr = 0; \ uint_t actv = _c->cpu_intr_actv; \ for (; actv; actv >>= 1) \ intr++; \ ASSERT(intr < (1 << 3)); \ (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ } #endif #define DT_BSWAP_8(x) ((x) & 0xff) #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) #define DT_MASK_LO 0x00000000FFFFFFFFULL #define DTRACE_STORE(type, tomax, offset, what) \ *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); #ifndef __x86 #define DTRACE_ALIGNCHECK(addr, size, flags) \ if (addr & (size - 1)) { \ *flags |= CPU_DTRACE_BADALIGN; \ cpu_core[curcpu].cpuc_dtrace_illval = addr; \ return (0); \ } #else #define DTRACE_ALIGNCHECK(addr, size, flags) #endif /* * Test whether a range of memory starting at testaddr of size testsz falls * within the range of memory described by addr, sz. We take care to avoid * problems with overflow and underflow of the unsigned quantities, and * disallow all negative sizes. Ranges of size 0 are allowed. */ #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \ (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \ (testaddr) + (testsz) >= (testaddr)) #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \ do { \ if ((remp) != NULL) { \ *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \ } \ _NOTE(CONSTCOND) } while (0) /* * Test whether alloc_sz bytes will fit in the scratch region. We isolate * alloc_sz on the righthand side of the comparison in order to avoid overflow * or underflow in the comparison with it. This is simpler than the INRANGE * check above, because we know that the dtms_scratch_ptr is valid in the * range. Allocations of size zero are allowed. */ #define DTRACE_INSCRATCH(mstate, alloc_sz) \ ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ (mstate)->dtms_scratch_ptr >= (alloc_sz)) #define DTRACE_LOADFUNC(bits) \ /*CSTYLED*/ \ uint##bits##_t \ dtrace_load##bits(uintptr_t addr) \ { \ size_t size = bits / NBBY; \ /*CSTYLED*/ \ uint##bits##_t rval; \ int i; \ volatile uint16_t *flags = (volatile uint16_t *) \ &cpu_core[curcpu].cpuc_dtrace_flags; \ \ DTRACE_ALIGNCHECK(addr, size, flags); \ \ for (i = 0; i < dtrace_toxranges; i++) { \ if (addr >= dtrace_toxrange[i].dtt_limit) \ continue; \ \ if (addr + size <= dtrace_toxrange[i].dtt_base) \ continue; \ \ /* \ * This address falls within a toxic region; return 0. \ */ \ *flags |= CPU_DTRACE_BADADDR; \ cpu_core[curcpu].cpuc_dtrace_illval = addr; \ return (0); \ } \ \ *flags |= CPU_DTRACE_NOFAULT; \ /*CSTYLED*/ \ rval = *((volatile uint##bits##_t *)addr); \ *flags &= ~CPU_DTRACE_NOFAULT; \ \ return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ } #ifdef _LP64 #define dtrace_loadptr dtrace_load64 #else #define dtrace_loadptr dtrace_load32 #endif #define DTRACE_DYNHASH_FREE 0 #define DTRACE_DYNHASH_SINK 1 #define DTRACE_DYNHASH_VALID 2 #define DTRACE_MATCH_NEXT 0 #define DTRACE_MATCH_DONE 1 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') #define DTRACE_STATE_ALIGN 64 #define DTRACE_FLAGS2FLT(flags) \ (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ DTRACEFLT_UNKNOWN) #define DTRACEACT_ISSTRING(act) \ ((act)->dta_kind == DTRACEACT_DIFEXPR && \ (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) /* Function prototype definitions: */ static size_t dtrace_strlen(const char *, size_t); static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); static void dtrace_enabling_provide(dtrace_provider_t *); static int dtrace_enabling_match(dtrace_enabling_t *, int *); static void dtrace_enabling_matchall(void); static void dtrace_enabling_reap(void); static dtrace_state_t *dtrace_anon_grab(void); static uint64_t dtrace_helper(int, dtrace_mstate_t *, dtrace_state_t *, uint64_t, uint64_t); static dtrace_helpers_t *dtrace_helpers_create(proc_t *); static void dtrace_buffer_drop(dtrace_buffer_t *); static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, dtrace_state_t *, dtrace_mstate_t *); static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, dtrace_optval_t); static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); uint16_t dtrace_load16(uintptr_t); uint32_t dtrace_load32(uintptr_t); uint64_t dtrace_load64(uintptr_t); uint8_t dtrace_load8(uintptr_t); void dtrace_dynvar_clean(dtrace_dstate_t *); dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); static int dtrace_priv_proc(dtrace_state_t *); static void dtrace_getf_barrier(void); static int dtrace_canload_remains(uint64_t, size_t, size_t *, dtrace_mstate_t *, dtrace_vstate_t *); static int dtrace_canstore_remains(uint64_t, size_t, size_t *, dtrace_mstate_t *, dtrace_vstate_t *); /* * DTrace Probe Context Functions * * These functions are called from probe context. Because probe context is * any context in which C may be called, arbitrarily locks may be held, * interrupts may be disabled, we may be in arbitrary dispatched state, etc. * As a result, functions called from probe context may only call other DTrace * support functions -- they may not interact at all with the system at large. * (Note that the ASSERT macro is made probe-context safe by redefining it in * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary * loads are to be performed from probe context, they _must_ be in terms of * the safe dtrace_load*() variants. * * Some functions in this block are not actually called from probe context; * for these functions, there will be a comment above the function reading * "Note: not called from probe context." */ void dtrace_panic(const char *format, ...) { va_list alist; va_start(alist, format); #ifdef __FreeBSD__ vpanic(format, alist); #else dtrace_vpanic(format, alist); #endif va_end(alist); } int dtrace_assfail(const char *a, const char *f, int l) { dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); /* * We just need something here that even the most clever compiler * cannot optimize away. */ return (a[(uintptr_t)f]); } /* * Atomically increment a specified error counter from probe context. */ static void dtrace_error(uint32_t *counter) { /* * Most counters stored to in probe context are per-CPU counters. * However, there are some error conditions that are sufficiently * arcane that they don't merit per-CPU storage. If these counters * are incremented concurrently on different CPUs, scalability will be * adversely affected -- but we don't expect them to be white-hot in a * correctly constructed enabling... */ uint32_t oval, nval; do { oval = *counter; if ((nval = oval + 1) == 0) { /* * If the counter would wrap, set it to 1 -- assuring * that the counter is never zero when we have seen * errors. (The counter must be 32-bits because we * aren't guaranteed a 64-bit compare&swap operation.) * To save this code both the infamy of being fingered * by a priggish news story and the indignity of being * the target of a neo-puritan witch trial, we're * carefully avoiding any colorful description of the * likelihood of this condition -- but suffice it to * say that it is only slightly more likely than the * overflow of predicate cache IDs, as discussed in * dtrace_predicate_create(). */ nval = 1; } } while (dtrace_cas32(counter, oval, nval) != oval); } /* * Use the DTRACE_LOADFUNC macro to define functions for each of loading a * uint8_t, a uint16_t, a uint32_t and a uint64_t. */ /* BEGIN CSTYLED */ DTRACE_LOADFUNC(8) DTRACE_LOADFUNC(16) DTRACE_LOADFUNC(32) DTRACE_LOADFUNC(64) /* END CSTYLED */ static int dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) { if (dest < mstate->dtms_scratch_base) return (0); if (dest + size < dest) return (0); if (dest + size > mstate->dtms_scratch_ptr) return (0); return (1); } static int dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain, dtrace_statvar_t **svars, int nsvars) { int i; size_t maxglobalsize, maxlocalsize; if (nsvars == 0) return (0); maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t); maxlocalsize = maxglobalsize * NCPU; for (i = 0; i < nsvars; i++) { dtrace_statvar_t *svar = svars[i]; uint8_t scope; size_t size; if (svar == NULL || (size = svar->dtsv_size) == 0) continue; scope = svar->dtsv_var.dtdv_scope; /* * We verify that our size is valid in the spirit of providing * defense in depth: we want to prevent attackers from using * DTrace to escalate an orthogonal kernel heap corruption bug * into the ability to store to arbitrary locations in memory. */ VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) || (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize)); if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) { DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data, svar->dtsv_size); return (1); } } return (0); } /* * Check to see if the address is within a memory region to which a store may * be issued. This includes the DTrace scratch areas, and any DTrace variable * region. The caller of dtrace_canstore() is responsible for performing any * alignment checks that are needed before stores are actually executed. */ static int dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate)); } /* * Implementation of dtrace_canstore which communicates the upper bound of the * allowed memory region. */ static int dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { /* * First, check to see if the address is in scratch space... */ if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, mstate->dtms_scratch_size)) { DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base, mstate->dtms_scratch_size); return (1); } /* * Now check to see if it's a dynamic variable. This check will pick * up both thread-local variables and any global dynamically-allocated * variables. */ if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base, vstate->dtvs_dynvars.dtds_size)) { dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; uintptr_t base = (uintptr_t)dstate->dtds_base + (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); uintptr_t chunkoffs; dtrace_dynvar_t *dvar; /* * Before we assume that we can store here, we need to make * sure that it isn't in our metadata -- storing to our * dynamic variable metadata would corrupt our state. For * the range to not include any dynamic variable metadata, * it must: * * (1) Start above the hash table that is at the base of * the dynamic variable space * * (2) Have a starting chunk offset that is beyond the * dtrace_dynvar_t that is at the base of every chunk * * (3) Not span a chunk boundary * * (4) Not be in the tuple space of a dynamic variable * */ if (addr < base) return (0); chunkoffs = (addr - base) % dstate->dtds_chunksize; if (chunkoffs < sizeof (dtrace_dynvar_t)) return (0); if (chunkoffs + sz > dstate->dtds_chunksize) return (0); dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs); if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) return (0); if (chunkoffs < sizeof (dtrace_dynvar_t) + ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t))) return (0); DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize); return (1); } /* * Finally, check the static local and global variables. These checks * take the longest, so we perform them last. */ if (dtrace_canstore_statvar(addr, sz, remain, vstate->dtvs_locals, vstate->dtvs_nlocals)) return (1); if (dtrace_canstore_statvar(addr, sz, remain, vstate->dtvs_globals, vstate->dtvs_nglobals)) return (1); return (0); } /* * Convenience routine to check to see if the address is within a memory * region in which a load may be issued given the user's privilege level; * if not, it sets the appropriate error flags and loads 'addr' into the * illegal value slot. * * DTrace subroutines (DIF_SUBR_*) should use this helper to implement * appropriate memory access protection. */ static int dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate)); } /* * Implementation of dtrace_canload which communicates the uppoer bound of the * allowed memory region. */ static int dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; file_t *fp; /* * If we hold the privilege to read from kernel memory, then * everything is readable. */ if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { DTRACE_RANGE_REMAIN(remain, addr, addr, sz); return (1); } /* * You can obviously read that which you can store. */ if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate)) return (1); /* * We're allowed to read from our own string table. */ if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab, mstate->dtms_difo->dtdo_strlen)) { DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_difo->dtdo_strtab, mstate->dtms_difo->dtdo_strlen); return (1); } if (vstate->dtvs_state != NULL && dtrace_priv_proc(vstate->dtvs_state)) { proc_t *p; /* * When we have privileges to the current process, there are * several context-related kernel structures that are safe to * read, even absent the privilege to read from kernel memory. * These reads are safe because these structures contain only * state that (1) we're permitted to read, (2) is harmless or * (3) contains pointers to additional kernel state that we're * not permitted to read (and as such, do not present an * opportunity for privilege escalation). Finally (and * critically), because of the nature of their relation with * the current thread context, the memory associated with these * structures cannot change over the duration of probe context, * and it is therefore impossible for this memory to be * deallocated and reallocated as something else while it's * being operated upon. */ if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) { DTRACE_RANGE_REMAIN(remain, addr, curthread, sizeof (kthread_t)); return (1); } if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr, sz, curthread->t_procp, sizeof (proc_t))) { DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp, sizeof (proc_t)); return (1); } if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz, curthread->t_cred, sizeof (cred_t))) { DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred, sizeof (cred_t)); return (1); } #ifdef illumos if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz, &(p->p_pidp->pid_id), sizeof (pid_t))) { DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id), sizeof (pid_t)); return (1); } if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz, curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) { DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread)); return (1); } #endif } if ((fp = mstate->dtms_getf) != NULL) { uintptr_t psz = sizeof (void *); vnode_t *vp; vnodeops_t *op; /* * When getf() returns a file_t, the enabling is implicitly * granted the (transient) right to read the returned file_t * as well as the v_path and v_op->vnop_name of the underlying * vnode. These accesses are allowed after a successful * getf() because the members that they refer to cannot change * once set -- and the barrier logic in the kernel's closef() * path assures that the file_t and its referenced vode_t * cannot themselves be stale (that is, it impossible for * either dtms_getf itself or its f_vnode member to reference * freed memory). */ if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) { DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t)); return (1); } if ((vp = fp->f_vnode) != NULL) { size_t slen; #ifdef illumos if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) { DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path, psz); return (1); } slen = strlen(vp->v_path) + 1; if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) { DTRACE_RANGE_REMAIN(remain, addr, vp->v_path, slen); return (1); } #endif if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) { DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op, psz); return (1); } #ifdef illumos if ((op = vp->v_op) != NULL && DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) { DTRACE_RANGE_REMAIN(remain, addr, &op->vnop_name, psz); return (1); } if (op != NULL && op->vnop_name != NULL && DTRACE_INRANGE(addr, sz, op->vnop_name, (slen = strlen(op->vnop_name) + 1))) { DTRACE_RANGE_REMAIN(remain, addr, op->vnop_name, slen); return (1); } #endif } } DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); *illval = addr; return (0); } /* * Convenience routine to check to see if a given string is within a memory * region in which a load may be issued given the user's privilege level; * this exists so that we don't need to issue unnecessary dtrace_strlen() * calls in the event that the user has all privileges. */ static int dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { size_t rsize; /* * If we hold the privilege to read from kernel memory, then * everything is readable. */ if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { DTRACE_RANGE_REMAIN(remain, addr, addr, sz); return (1); } /* * Even if the caller is uninterested in querying the remaining valid * range, it is required to ensure that the access is allowed. */ if (remain == NULL) { remain = &rsize; } if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) { size_t strsz; /* * Perform the strlen after determining the length of the * memory region which is accessible. This prevents timing * information from being used to find NULs in memory which is * not accessible to the caller. */ strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, MIN(sz, *remain)); if (strsz <= *remain) { return (1); } } return (0); } /* * Convenience routine to check to see if a given variable is within a memory * region in which a load may be issued given the user's privilege level. */ static int dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { size_t sz; ASSERT(type->dtdt_flags & DIF_TF_BYREF); /* * Calculate the max size before performing any checks since even * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function * return the max length via 'remain'. */ if (type->dtdt_kind == DIF_TYPE_STRING) { dtrace_state_t *state = vstate->dtvs_state; if (state != NULL) { sz = state->dts_options[DTRACEOPT_STRSIZE]; } else { /* * In helper context, we have a NULL state; fall back * to using the system-wide default for the string size * in this case. */ sz = dtrace_strsize_default; } } else { sz = type->dtdt_size; } /* * If we hold the privilege to read from kernel memory, then * everything is readable. */ if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) { DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz); return (1); } if (type->dtdt_kind == DIF_TYPE_STRING) { return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate, vstate)); } return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate, vstate)); } /* * Convert a string to a signed integer using safe loads. * * NOTE: This function uses various macros from strtolctype.h to manipulate * digit values, etc -- these have all been checked to ensure they make * no additional function calls. */ static int64_t dtrace_strtoll(char *input, int base, size_t limit) { uintptr_t pos = (uintptr_t)input; int64_t val = 0; int x; boolean_t neg = B_FALSE; char c, cc, ccc; uintptr_t end = pos + limit; /* * Consume any whitespace preceding digits. */ while ((c = dtrace_load8(pos)) == ' ' || c == '\t') pos++; /* * Handle an explicit sign if one is present. */ if (c == '-' || c == '+') { if (c == '-') neg = B_TRUE; c = dtrace_load8(++pos); } /* * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it * if present. */ if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' || cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) { pos += 2; c = ccc; } /* * Read in contiguous digits until the first non-digit character. */ for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base; c = dtrace_load8(++pos)) val = val * base + x; return (neg ? -val : val); } /* * Compare two strings using safe loads. */ static int dtrace_strncmp(char *s1, char *s2, size_t limit) { uint8_t c1, c2; volatile uint16_t *flags; if (s1 == s2 || limit == 0) return (0); flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; do { if (s1 == NULL) { c1 = '\0'; } else { c1 = dtrace_load8((uintptr_t)s1++); } if (s2 == NULL) { c2 = '\0'; } else { c2 = dtrace_load8((uintptr_t)s2++); } if (c1 != c2) return (c1 - c2); } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); return (0); } /* * Compute strlen(s) for a string using safe memory accesses. The additional * len parameter is used to specify a maximum length to ensure completion. */ static size_t dtrace_strlen(const char *s, size_t lim) { uint_t len; for (len = 0; len != lim; len++) { if (dtrace_load8((uintptr_t)s++) == '\0') break; } return (len); } /* * Check if an address falls within a toxic region. */ static int dtrace_istoxic(uintptr_t kaddr, size_t size) { uintptr_t taddr, tsize; int i; for (i = 0; i < dtrace_toxranges; i++) { taddr = dtrace_toxrange[i].dtt_base; tsize = dtrace_toxrange[i].dtt_limit - taddr; if (kaddr - taddr < tsize) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[curcpu].cpuc_dtrace_illval = kaddr; return (1); } if (taddr - kaddr < size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[curcpu].cpuc_dtrace_illval = taddr; return (1); } } return (0); } /* * Copy src to dst using safe memory accesses. The src is assumed to be unsafe * memory specified by the DIF program. The dst is assumed to be safe memory * that we can store to directly because it is managed by DTrace. As with * standard bcopy, overlapping copies are handled properly. */ static void dtrace_bcopy(const void *src, void *dst, size_t len) { if (len != 0) { uint8_t *s1 = dst; const uint8_t *s2 = src; if (s1 <= s2) { do { *s1++ = dtrace_load8((uintptr_t)s2++); } while (--len != 0); } else { s2 += len; s1 += len; do { *--s1 = dtrace_load8((uintptr_t)--s2); } while (--len != 0); } } } /* * Copy src to dst using safe memory accesses, up to either the specified * length, or the point that a nul byte is encountered. The src is assumed to * be unsafe memory specified by the DIF program. The dst is assumed to be * safe memory that we can store to directly because it is managed by DTrace. * Unlike dtrace_bcopy(), overlapping regions are not handled. */ static void dtrace_strcpy(const void *src, void *dst, size_t len) { if (len != 0) { uint8_t *s1 = dst, c; const uint8_t *s2 = src; do { *s1++ = c = dtrace_load8((uintptr_t)s2++); } while (--len != 0 && c != '\0'); } } /* * Copy src to dst, deriving the size and type from the specified (BYREF) * variable type. The src is assumed to be unsafe memory specified by the DIF * program. The dst is assumed to be DTrace variable memory that is of the * specified type; we assume that we can store to directly. */ static void dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit) { ASSERT(type->dtdt_flags & DIF_TF_BYREF); if (type->dtdt_kind == DIF_TYPE_STRING) { dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit)); } else { dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit)); } } /* * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be * unsafe memory specified by the DIF program. The s2 data is assumed to be * safe memory that we can access directly because it is managed by DTrace. */ static int dtrace_bcmp(const void *s1, const void *s2, size_t len) { volatile uint16_t *flags; flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; if (s1 == s2) return (0); if (s1 == NULL || s2 == NULL) return (1); if (s1 != s2 && len != 0) { const uint8_t *ps1 = s1; const uint8_t *ps2 = s2; do { if (dtrace_load8((uintptr_t)ps1++) != *ps2++) return (1); } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); } return (0); } /* * Zero the specified region using a simple byte-by-byte loop. Note that this * is for safe DTrace-managed memory only. */ static void dtrace_bzero(void *dst, size_t len) { uchar_t *cp; for (cp = dst; len != 0; len--) *cp++ = 0; } static void dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) { uint64_t result[2]; result[0] = addend1[0] + addend2[0]; result[1] = addend1[1] + addend2[1] + (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); sum[0] = result[0]; sum[1] = result[1]; } /* * Shift the 128-bit value in a by b. If b is positive, shift left. * If b is negative, shift right. */ static void dtrace_shift_128(uint64_t *a, int b) { uint64_t mask; if (b == 0) return; if (b < 0) { b = -b; if (b >= 64) { a[0] = a[1] >> (b - 64); a[1] = 0; } else { a[0] >>= b; mask = 1LL << (64 - b); mask -= 1; a[0] |= ((a[1] & mask) << (64 - b)); a[1] >>= b; } } else { if (b >= 64) { a[1] = a[0] << (b - 64); a[0] = 0; } else { a[1] <<= b; mask = a[0] >> (64 - b); a[1] |= mask; a[0] <<= b; } } } /* * The basic idea is to break the 2 64-bit values into 4 32-bit values, * use native multiplication on those, and then re-combine into the * resulting 128-bit value. * * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = * hi1 * hi2 << 64 + * hi1 * lo2 << 32 + * hi2 * lo1 << 32 + * lo1 * lo2 */ static void dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) { uint64_t hi1, hi2, lo1, lo2; uint64_t tmp[2]; hi1 = factor1 >> 32; hi2 = factor2 >> 32; lo1 = factor1 & DT_MASK_LO; lo2 = factor2 & DT_MASK_LO; product[0] = lo1 * lo2; product[1] = hi1 * hi2; tmp[0] = hi1 * lo2; tmp[1] = 0; dtrace_shift_128(tmp, 32); dtrace_add_128(product, tmp, product); tmp[0] = hi2 * lo1; tmp[1] = 0; dtrace_shift_128(tmp, 32); dtrace_add_128(product, tmp, product); } /* * This privilege check should be used by actions and subroutines to * verify that the user credentials of the process that enabled the * invoking ECB match the target credentials */ static int dtrace_priv_proc_common_user(dtrace_state_t *state) { cred_t *cr, *s_cr = state->dts_cred.dcr_cred; /* * We should always have a non-NULL state cred here, since if cred * is null (anonymous tracing), we fast-path bypass this routine. */ ASSERT(s_cr != NULL); if ((cr = CRED()) != NULL && s_cr->cr_uid == cr->cr_uid && s_cr->cr_uid == cr->cr_ruid && s_cr->cr_uid == cr->cr_suid && s_cr->cr_gid == cr->cr_gid && s_cr->cr_gid == cr->cr_rgid && s_cr->cr_gid == cr->cr_sgid) return (1); return (0); } /* * This privilege check should be used by actions and subroutines to * verify that the zone of the process that enabled the invoking ECB * matches the target credentials */ static int dtrace_priv_proc_common_zone(dtrace_state_t *state) { #ifdef illumos cred_t *cr, *s_cr = state->dts_cred.dcr_cred; /* * We should always have a non-NULL state cred here, since if cred * is null (anonymous tracing), we fast-path bypass this routine. */ ASSERT(s_cr != NULL); if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone) return (1); return (0); #else return (1); #endif } /* * This privilege check should be used by actions and subroutines to * verify that the process has not setuid or changed credentials. */ static int dtrace_priv_proc_common_nocd(void) { proc_t *proc; if ((proc = ttoproc(curthread)) != NULL && !(proc->p_flag & SNOCD)) return (1); return (0); } static int dtrace_priv_proc_destructive(dtrace_state_t *state) { int action = state->dts_cred.dcr_action; if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && dtrace_priv_proc_common_zone(state) == 0) goto bad; if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && dtrace_priv_proc_common_user(state) == 0) goto bad; if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && dtrace_priv_proc_common_nocd() == 0) goto bad; return (1); bad: cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; return (0); } static int dtrace_priv_proc_control(dtrace_state_t *state) { if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) return (1); if (dtrace_priv_proc_common_zone(state) && dtrace_priv_proc_common_user(state) && dtrace_priv_proc_common_nocd()) return (1); cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; return (0); } static int dtrace_priv_proc(dtrace_state_t *state) { if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) return (1); cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; return (0); } static int dtrace_priv_kernel(dtrace_state_t *state) { if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) return (1); cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; return (0); } static int dtrace_priv_kernel_destructive(dtrace_state_t *state) { if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) return (1); cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; return (0); } /* * Determine if the dte_cond of the specified ECB allows for processing of * the current probe to continue. Note that this routine may allow continued * processing, but with access(es) stripped from the mstate's dtms_access * field. */ static int dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate, dtrace_ecb_t *ecb) { dtrace_probe_t *probe = ecb->dte_probe; dtrace_provider_t *prov = probe->dtpr_provider; dtrace_pops_t *pops = &prov->dtpv_pops; int mode = DTRACE_MODE_NOPRIV_DROP; ASSERT(ecb->dte_cond); #ifdef illumos if (pops->dtps_mode != NULL) { mode = pops->dtps_mode(prov->dtpv_arg, probe->dtpr_id, probe->dtpr_arg); ASSERT((mode & DTRACE_MODE_USER) || (mode & DTRACE_MODE_KERNEL)); ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) || (mode & DTRACE_MODE_NOPRIV_DROP)); } /* * If the dte_cond bits indicate that this consumer is only allowed to * see user-mode firings of this probe, call the provider's dtps_mode() * entry point to check that the probe was fired while in a user * context. If that's not the case, use the policy specified by the * provider to determine if we drop the probe or merely restrict * operation. */ if (ecb->dte_cond & DTRACE_COND_USERMODE) { ASSERT(mode != DTRACE_MODE_NOPRIV_DROP); if (!(mode & DTRACE_MODE_USER)) { if (mode & DTRACE_MODE_NOPRIV_DROP) return (0); mstate->dtms_access &= ~DTRACE_ACCESS_ARGS; } } #endif /* * This is more subtle than it looks. We have to be absolutely certain * that CRED() isn't going to change out from under us so it's only * legit to examine that structure if we're in constrained situations. * Currently, the only times we'll this check is if a non-super-user * has enabled the profile or syscall providers -- providers that * allow visibility of all processes. For the profile case, the check * above will ensure that we're examining a user context. */ if (ecb->dte_cond & DTRACE_COND_OWNER) { cred_t *cr; cred_t *s_cr = state->dts_cred.dcr_cred; proc_t *proc; ASSERT(s_cr != NULL); if ((cr = CRED()) == NULL || s_cr->cr_uid != cr->cr_uid || s_cr->cr_uid != cr->cr_ruid || s_cr->cr_uid != cr->cr_suid || s_cr->cr_gid != cr->cr_gid || s_cr->cr_gid != cr->cr_rgid || s_cr->cr_gid != cr->cr_sgid || (proc = ttoproc(curthread)) == NULL || (proc->p_flag & SNOCD)) { if (mode & DTRACE_MODE_NOPRIV_DROP) return (0); #ifdef illumos mstate->dtms_access &= ~DTRACE_ACCESS_PROC; #endif } } #ifdef illumos /* * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not * in our zone, check to see if our mode policy is to restrict rather * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC * and DTRACE_ACCESS_ARGS */ if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { cred_t *cr; cred_t *s_cr = state->dts_cred.dcr_cred; ASSERT(s_cr != NULL); if ((cr = CRED()) == NULL || s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) { if (mode & DTRACE_MODE_NOPRIV_DROP) return (0); mstate->dtms_access &= ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS); } } #endif return (1); } /* * Note: not called from probe context. This function is called * asynchronously (and at a regular interval) from outside of probe context to * clean the dirty dynamic variable lists on all CPUs. Dynamic variable * cleaning is explained in detail in . */ void dtrace_dynvar_clean(dtrace_dstate_t *dstate) { dtrace_dynvar_t *dirty; dtrace_dstate_percpu_t *dcpu; dtrace_dynvar_t **rinsep; int i, j, work = 0; for (i = 0; i < NCPU; i++) { dcpu = &dstate->dtds_percpu[i]; rinsep = &dcpu->dtdsc_rinsing; /* * If the dirty list is NULL, there is no dirty work to do. */ if (dcpu->dtdsc_dirty == NULL) continue; if (dcpu->dtdsc_rinsing != NULL) { /* * If the rinsing list is non-NULL, then it is because * this CPU was selected to accept another CPU's * dirty list -- and since that time, dirty buffers * have accumulated. This is a highly unlikely * condition, but we choose to ignore the dirty * buffers -- they'll be picked up a future cleanse. */ continue; } if (dcpu->dtdsc_clean != NULL) { /* * If the clean list is non-NULL, then we're in a * situation where a CPU has done deallocations (we * have a non-NULL dirty list) but no allocations (we * also have a non-NULL clean list). We can't simply * move the dirty list into the clean list on this * CPU, yet we also don't want to allow this condition * to persist, lest a short clean list prevent a * massive dirty list from being cleaned (which in * turn could lead to otherwise avoidable dynamic * drops). To deal with this, we look for some CPU * with a NULL clean list, NULL dirty list, and NULL * rinsing list -- and then we borrow this CPU to * rinse our dirty list. */ for (j = 0; j < NCPU; j++) { dtrace_dstate_percpu_t *rinser; rinser = &dstate->dtds_percpu[j]; if (rinser->dtdsc_rinsing != NULL) continue; if (rinser->dtdsc_dirty != NULL) continue; if (rinser->dtdsc_clean != NULL) continue; rinsep = &rinser->dtdsc_rinsing; break; } if (j == NCPU) { /* * We were unable to find another CPU that * could accept this dirty list -- we are * therefore unable to clean it now. */ dtrace_dynvar_failclean++; continue; } } work = 1; /* * Atomically move the dirty list aside. */ do { dirty = dcpu->dtdsc_dirty; /* * Before we zap the dirty list, set the rinsing list. * (This allows for a potential assertion in * dtrace_dynvar(): if a free dynamic variable appears * on a hash chain, either the dirty list or the * rinsing list for some CPU must be non-NULL.) */ *rinsep = dirty; dtrace_membar_producer(); } while (dtrace_casptr(&dcpu->dtdsc_dirty, dirty, NULL) != dirty); } if (!work) { /* * We have no work to do; we can simply return. */ return; } dtrace_sync(); for (i = 0; i < NCPU; i++) { dcpu = &dstate->dtds_percpu[i]; if (dcpu->dtdsc_rinsing == NULL) continue; /* * We are now guaranteed that no hash chain contains a pointer * into this dirty list; we can make it clean. */ ASSERT(dcpu->dtdsc_clean == NULL); dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; dcpu->dtdsc_rinsing = NULL; } /* * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make * sure that all CPUs have seen all of the dtdsc_clean pointers. * This prevents a race whereby a CPU incorrectly decides that * the state should be something other than DTRACE_DSTATE_CLEAN * after dtrace_dynvar_clean() has completed. */ dtrace_sync(); dstate->dtds_state = DTRACE_DSTATE_CLEAN; } /* * Depending on the value of the op parameter, this function looks-up, * allocates or deallocates an arbitrarily-keyed dynamic variable. If an * allocation is requested, this function will return a pointer to a * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no * variable can be allocated. If NULL is returned, the appropriate counter * will be incremented. */ dtrace_dynvar_t * dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) { uint64_t hashval = DTRACE_DYNHASH_VALID; dtrace_dynhash_t *hash = dstate->dtds_hash; dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; processorid_t me = curcpu, cpu = me; dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; size_t bucket, ksize; size_t chunksize = dstate->dtds_chunksize; uintptr_t kdata, lock, nstate; uint_t i; ASSERT(nkeys != 0); /* * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" * algorithm. For the by-value portions, we perform the algorithm in * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a * bit, and seems to have only a minute effect on distribution. For * the by-reference data, we perform "One-at-a-time" iterating (safely) * over each referenced byte. It's painful to do this, but it's much * better than pathological hash distribution. The efficacy of the * hashing algorithm (and a comparison with other algorithms) may be * found by running the ::dtrace_dynstat MDB dcmd. */ for (i = 0; i < nkeys; i++) { if (key[i].dttk_size == 0) { uint64_t val = key[i].dttk_value; hashval += (val >> 48) & 0xffff; hashval += (hashval << 10); hashval ^= (hashval >> 6); hashval += (val >> 32) & 0xffff; hashval += (hashval << 10); hashval ^= (hashval >> 6); hashval += (val >> 16) & 0xffff; hashval += (hashval << 10); hashval ^= (hashval >> 6); hashval += val & 0xffff; hashval += (hashval << 10); hashval ^= (hashval >> 6); } else { /* * This is incredibly painful, but it beats the hell * out of the alternative. */ uint64_t j, size = key[i].dttk_size; uintptr_t base = (uintptr_t)key[i].dttk_value; if (!dtrace_canload(base, size, mstate, vstate)) break; for (j = 0; j < size; j++) { hashval += dtrace_load8(base + j); hashval += (hashval << 10); hashval ^= (hashval >> 6); } } } if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) return (NULL); hashval += (hashval << 3); hashval ^= (hashval >> 11); hashval += (hashval << 15); /* * There is a remote chance (ideally, 1 in 2^31) that our hashval * comes out to be one of our two sentinel hash values. If this * actually happens, we set the hashval to be a value known to be a * non-sentinel value. */ if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) hashval = DTRACE_DYNHASH_VALID; /* * Yes, it's painful to do a divide here. If the cycle count becomes * important here, tricks can be pulled to reduce it. (However, it's * critical that hash collisions be kept to an absolute minimum; * they're much more painful than a divide.) It's better to have a * solution that generates few collisions and still keeps things * relatively simple. */ bucket = hashval % dstate->dtds_hashsize; if (op == DTRACE_DYNVAR_DEALLOC) { volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; for (;;) { while ((lock = *lockp) & 1) continue; if (dtrace_casptr((volatile void *)lockp, (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) break; } dtrace_membar_producer(); } top: prev = NULL; lock = hash[bucket].dtdh_lock; dtrace_membar_consumer(); start = hash[bucket].dtdh_chain; ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || start->dtdv_hashval != DTRACE_DYNHASH_FREE || op != DTRACE_DYNVAR_DEALLOC)); for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; dtrace_key_t *dkey = &dtuple->dtt_key[0]; if (dvar->dtdv_hashval != hashval) { if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { /* * We've reached the sink, and therefore the * end of the hash chain; we can kick out of * the loop knowing that we have seen a valid * snapshot of state. */ ASSERT(dvar->dtdv_next == NULL); ASSERT(dvar == &dtrace_dynhash_sink); break; } if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { /* * We've gone off the rails: somewhere along * the line, one of the members of this hash * chain was deleted. Note that we could also * detect this by simply letting this loop run * to completion, as we would eventually hit * the end of the dirty list. However, we * want to avoid running the length of the * dirty list unnecessarily (it might be quite * long), so we catch this as early as * possible by detecting the hash marker. In * this case, we simply set dvar to NULL and * break; the conditional after the loop will * send us back to top. */ dvar = NULL; break; } goto next; } if (dtuple->dtt_nkeys != nkeys) goto next; for (i = 0; i < nkeys; i++, dkey++) { if (dkey->dttk_size != key[i].dttk_size) goto next; /* size or type mismatch */ if (dkey->dttk_size != 0) { if (dtrace_bcmp( (void *)(uintptr_t)key[i].dttk_value, (void *)(uintptr_t)dkey->dttk_value, dkey->dttk_size)) goto next; } else { if (dkey->dttk_value != key[i].dttk_value) goto next; } } if (op != DTRACE_DYNVAR_DEALLOC) return (dvar); ASSERT(dvar->dtdv_next == NULL || dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); if (prev != NULL) { ASSERT(hash[bucket].dtdh_chain != dvar); ASSERT(start != dvar); ASSERT(prev->dtdv_next == dvar); prev->dtdv_next = dvar->dtdv_next; } else { if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar->dtdv_next) != start) { /* * We have failed to atomically swing the * hash table head pointer, presumably because * of a conflicting allocation on another CPU. * We need to reread the hash chain and try * again. */ goto top; } } dtrace_membar_producer(); /* * Now set the hash value to indicate that it's free. */ ASSERT(hash[bucket].dtdh_chain != dvar); dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; dtrace_membar_producer(); /* * Set the next pointer to point at the dirty list, and * atomically swing the dirty pointer to the newly freed dvar. */ do { next = dcpu->dtdsc_dirty; dvar->dtdv_next = next; } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); /* * Finally, unlock this hash bucket. */ ASSERT(hash[bucket].dtdh_lock == lock); ASSERT(lock & 1); hash[bucket].dtdh_lock++; return (NULL); next: prev = dvar; continue; } if (dvar == NULL) { /* * If dvar is NULL, it is because we went off the rails: * one of the elements that we traversed in the hash chain * was deleted while we were traversing it. In this case, * we assert that we aren't doing a dealloc (deallocs lock * the hash bucket to prevent themselves from racing with * one another), and retry the hash chain traversal. */ ASSERT(op != DTRACE_DYNVAR_DEALLOC); goto top; } if (op != DTRACE_DYNVAR_ALLOC) { /* * If we are not to allocate a new variable, we want to * return NULL now. Before we return, check that the value * of the lock word hasn't changed. If it has, we may have * seen an inconsistent snapshot. */ if (op == DTRACE_DYNVAR_NOALLOC) { if (hash[bucket].dtdh_lock != lock) goto top; } else { ASSERT(op == DTRACE_DYNVAR_DEALLOC); ASSERT(hash[bucket].dtdh_lock == lock); ASSERT(lock & 1); hash[bucket].dtdh_lock++; } return (NULL); } /* * We need to allocate a new dynamic variable. The size we need is the * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the * size of any auxiliary key data (rounded up to 8-byte alignment) plus * the size of any referred-to data (dsize). We then round the final * size up to the chunksize for allocation. */ for (ksize = 0, i = 0; i < nkeys; i++) ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); /* * This should be pretty much impossible, but could happen if, say, * strange DIF specified the tuple. Ideally, this should be an * assertion and not an error condition -- but that requires that the * chunksize calculation in dtrace_difo_chunksize() be absolutely * bullet-proof. (That is, it must not be able to be fooled by * malicious DIF.) Given the lack of backwards branches in DIF, * solving this would presumably not amount to solving the Halting * Problem -- but it still seems awfully hard. */ if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + ksize + dsize > chunksize) { dcpu->dtdsc_drops++; return (NULL); } nstate = DTRACE_DSTATE_EMPTY; do { retry: free = dcpu->dtdsc_free; if (free == NULL) { dtrace_dynvar_t *clean = dcpu->dtdsc_clean; void *rval; if (clean == NULL) { /* * We're out of dynamic variable space on * this CPU. Unless we have tried all CPUs, * we'll try to allocate from a different * CPU. */ switch (dstate->dtds_state) { case DTRACE_DSTATE_CLEAN: { void *sp = &dstate->dtds_state; if (++cpu >= NCPU) cpu = 0; if (dcpu->dtdsc_dirty != NULL && nstate == DTRACE_DSTATE_EMPTY) nstate = DTRACE_DSTATE_DIRTY; if (dcpu->dtdsc_rinsing != NULL) nstate = DTRACE_DSTATE_RINSING; dcpu = &dstate->dtds_percpu[cpu]; if (cpu != me) goto retry; (void) dtrace_cas32(sp, DTRACE_DSTATE_CLEAN, nstate); /* * To increment the correct bean * counter, take another lap. */ goto retry; } case DTRACE_DSTATE_DIRTY: dcpu->dtdsc_dirty_drops++; break; case DTRACE_DSTATE_RINSING: dcpu->dtdsc_rinsing_drops++; break; case DTRACE_DSTATE_EMPTY: dcpu->dtdsc_drops++; break; } DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); return (NULL); } /* * The clean list appears to be non-empty. We want to * move the clean list to the free list; we start by * moving the clean pointer aside. */ if (dtrace_casptr(&dcpu->dtdsc_clean, clean, NULL) != clean) { /* * We are in one of two situations: * * (a) The clean list was switched to the * free list by another CPU. * * (b) The clean list was added to by the * cleansing cyclic. * * In either of these situations, we can * just reattempt the free list allocation. */ goto retry; } ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); /* * Now we'll move the clean list to our free list. * It's impossible for this to fail: the only way * the free list can be updated is through this * code path, and only one CPU can own the clean list. * Thus, it would only be possible for this to fail if * this code were racing with dtrace_dynvar_clean(). * (That is, if dtrace_dynvar_clean() updated the clean * list, and we ended up racing to update the free * list.) This race is prevented by the dtrace_sync() * in dtrace_dynvar_clean() -- which flushes the * owners of the clean lists out before resetting * the clean lists. */ dcpu = &dstate->dtds_percpu[me]; rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); ASSERT(rval == NULL); goto retry; } dvar = free; new_free = dvar->dtdv_next; } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); /* * We have now allocated a new chunk. We copy the tuple keys into the * tuple array and copy any referenced key data into the data space * following the tuple array. As we do this, we relocate dttk_value * in the final tuple to point to the key data address in the chunk. */ kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; dvar->dtdv_data = (void *)(kdata + ksize); dvar->dtdv_tuple.dtt_nkeys = nkeys; for (i = 0; i < nkeys; i++) { dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; size_t kesize = key[i].dttk_size; if (kesize != 0) { dtrace_bcopy( (const void *)(uintptr_t)key[i].dttk_value, (void *)kdata, kesize); dkey->dttk_value = kdata; kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); } else { dkey->dttk_value = key[i].dttk_value; } dkey->dttk_size = kesize; } ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); dvar->dtdv_hashval = hashval; dvar->dtdv_next = start; if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) return (dvar); /* * The cas has failed. Either another CPU is adding an element to * this hash chain, or another CPU is deleting an element from this * hash chain. The simplest way to deal with both of these cases * (though not necessarily the most efficient) is to free our * allocated block and re-attempt it all. Note that the free is * to the dirty list and _not_ to the free list. This is to prevent * races with allocators, above. */ dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; dtrace_membar_producer(); do { free = dcpu->dtdsc_dirty; dvar->dtdv_next = free; } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); goto top; } /*ARGSUSED*/ static void dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) { if ((int64_t)nval < (int64_t)*oval) *oval = nval; } /*ARGSUSED*/ static void dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) { if ((int64_t)nval > (int64_t)*oval) *oval = nval; } static void dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) { int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; int64_t val = (int64_t)nval; if (val < 0) { for (i = 0; i < zero; i++) { if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { quanta[i] += incr; return; } } } else { for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { quanta[i - 1] += incr; return; } } quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; return; } ASSERT(0); } static void dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) { uint64_t arg = *lquanta++; int32_t base = DTRACE_LQUANTIZE_BASE(arg); uint16_t step = DTRACE_LQUANTIZE_STEP(arg); uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); int32_t val = (int32_t)nval, level; ASSERT(step != 0); ASSERT(levels != 0); if (val < base) { /* * This is an underflow. */ lquanta[0] += incr; return; } level = (val - base) / step; if (level < levels) { lquanta[level + 1] += incr; return; } /* * This is an overflow. */ lquanta[levels + 1] += incr; } static int dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, uint16_t high, uint16_t nsteps, int64_t value) { int64_t this = 1, last, next; int base = 1, order; ASSERT(factor <= nsteps); ASSERT(nsteps % factor == 0); for (order = 0; order < low; order++) this *= factor; /* * If our value is less than our factor taken to the power of the * low order of magnitude, it goes into the zeroth bucket. */ if (value < (last = this)) return (0); for (this *= factor; order <= high; order++) { int nbuckets = this > nsteps ? nsteps : this; if ((next = this * factor) < this) { /* * We should not generally get log/linear quantizations * with a high magnitude that allows 64-bits to * overflow, but we nonetheless protect against this * by explicitly checking for overflow, and clamping * our value accordingly. */ value = this - 1; } if (value < this) { /* * If our value lies within this order of magnitude, * determine its position by taking the offset within * the order of magnitude, dividing by the bucket * width, and adding to our (accumulated) base. */ return (base + (value - last) / (this / nbuckets)); } base += nbuckets - (nbuckets / factor); last = this; this = next; } /* * Our value is greater than or equal to our factor taken to the * power of one plus the high magnitude -- return the top bucket. */ return (base); } static void dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) { uint64_t arg = *llquanta++; uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); llquanta[dtrace_aggregate_llquantize_bucket(factor, low, high, nsteps, nval)] += incr; } /*ARGSUSED*/ static void dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) { data[0]++; data[1] += nval; } /*ARGSUSED*/ static void dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) { int64_t snval = (int64_t)nval; uint64_t tmp[2]; data[0]++; data[1] += nval; /* * What we want to say here is: * * data[2] += nval * nval; * * But given that nval is 64-bit, we could easily overflow, so * we do this as 128-bit arithmetic. */ if (snval < 0) snval = -snval; dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); dtrace_add_128(data + 2, tmp, data + 2); } /*ARGSUSED*/ static void dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) { *oval = *oval + 1; } /*ARGSUSED*/ static void dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) { *oval += nval; } /* * Aggregate given the tuple in the principal data buffer, and the aggregating * action denoted by the specified dtrace_aggregation_t. The aggregation * buffer is specified as the buf parameter. This routine does not return * failure; if there is no space in the aggregation buffer, the data will be * dropped, and a corresponding counter incremented. */ static void dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) { dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; uint32_t i, ndx, size, fsize; uint32_t align = sizeof (uint64_t) - 1; dtrace_aggbuffer_t *agb; dtrace_aggkey_t *key; uint32_t hashval = 0, limit, isstr; caddr_t tomax, data, kdata; dtrace_actkind_t action; dtrace_action_t *act; uintptr_t offs; if (buf == NULL) return; if (!agg->dtag_hasarg) { /* * Currently, only quantize() and lquantize() take additional * arguments, and they have the same semantics: an increment * value that defaults to 1 when not present. If additional * aggregating actions take arguments, the setting of the * default argument value will presumably have to become more * sophisticated... */ arg = 1; } action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; size = rec->dtrd_offset - agg->dtag_base; fsize = size + rec->dtrd_size; ASSERT(dbuf->dtb_tomax != NULL); data = dbuf->dtb_tomax + offset + agg->dtag_base; if ((tomax = buf->dtb_tomax) == NULL) { dtrace_buffer_drop(buf); return; } /* * The metastructure is always at the bottom of the buffer. */ agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - sizeof (dtrace_aggbuffer_t)); if (buf->dtb_offset == 0) { /* * We just kludge up approximately 1/8th of the size to be * buckets. If this guess ends up being routinely * off-the-mark, we may need to dynamically readjust this * based on past performance. */ uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < (uintptr_t)tomax || hashsize == 0) { /* * We've been given a ludicrously small buffer; * increment our drop count and leave. */ dtrace_buffer_drop(buf); return; } /* * And now, a pathetic attempt to try to get a an odd (or * perchance, a prime) hash size for better hash distribution. */ if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) hashsize -= DTRACE_AGGHASHSIZE_SLEW; agb->dtagb_hashsize = hashsize; agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); agb->dtagb_free = (uintptr_t)agb->dtagb_hash; for (i = 0; i < agb->dtagb_hashsize; i++) agb->dtagb_hash[i] = NULL; } ASSERT(agg->dtag_first != NULL); ASSERT(agg->dtag_first->dta_intuple); /* * Calculate the hash value based on the key. Note that we _don't_ * include the aggid in the hashing (but we will store it as part of * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" * algorithm: a simple, quick algorithm that has no known funnels, and * gets good distribution in practice. The efficacy of the hashing * algorithm (and a comparison with other algorithms) may be found by * running the ::dtrace_aggstat MDB dcmd. */ for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { i = act->dta_rec.dtrd_offset - agg->dtag_base; limit = i + act->dta_rec.dtrd_size; ASSERT(limit <= size); isstr = DTRACEACT_ISSTRING(act); for (; i < limit; i++) { hashval += data[i]; hashval += (hashval << 10); hashval ^= (hashval >> 6); if (isstr && data[i] == '\0') break; } } hashval += (hashval << 3); hashval ^= (hashval >> 11); hashval += (hashval << 15); /* * Yes, the divide here is expensive -- but it's generally the least * of the performance issues given the amount of data that we iterate * over to compute hash values, compare data, etc. */ ndx = hashval % agb->dtagb_hashsize; for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { ASSERT((caddr_t)key >= tomax); ASSERT((caddr_t)key < tomax + buf->dtb_size); if (hashval != key->dtak_hashval || key->dtak_size != size) continue; kdata = key->dtak_data; ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { i = act->dta_rec.dtrd_offset - agg->dtag_base; limit = i + act->dta_rec.dtrd_size; ASSERT(limit <= size); isstr = DTRACEACT_ISSTRING(act); for (; i < limit; i++) { if (kdata[i] != data[i]) goto next; if (isstr && data[i] == '\0') break; } } if (action != key->dtak_action) { /* * We are aggregating on the same value in the same * aggregation with two different aggregating actions. * (This should have been picked up in the compiler, * so we may be dealing with errant or devious DIF.) * This is an error condition; we indicate as much, * and return. */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); return; } /* * This is a hit: we need to apply the aggregator to * the value at this key. */ agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); return; next: continue; } /* * We didn't find it. We need to allocate some zero-filled space, * link it into the hash table appropriately, and apply the aggregator * to the (zero-filled) value. */ offs = buf->dtb_offset; while (offs & (align - 1)) offs += sizeof (uint32_t); /* * If we don't have enough room to both allocate a new key _and_ * its associated data, increment the drop count and return. */ if ((uintptr_t)tomax + offs + fsize > agb->dtagb_free - sizeof (dtrace_aggkey_t)) { dtrace_buffer_drop(buf); return; } /*CONSTCOND*/ ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); agb->dtagb_free -= sizeof (dtrace_aggkey_t); key->dtak_data = kdata = tomax + offs; buf->dtb_offset = offs + fsize; /* * Now copy the data across. */ *((dtrace_aggid_t *)kdata) = agg->dtag_id; for (i = sizeof (dtrace_aggid_t); i < size; i++) kdata[i] = data[i]; /* * Because strings are not zeroed out by default, we need to iterate * looking for actions that store strings, and we need to explicitly * pad these strings out with zeroes. */ for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { int nul; if (!DTRACEACT_ISSTRING(act)) continue; i = act->dta_rec.dtrd_offset - agg->dtag_base; limit = i + act->dta_rec.dtrd_size; ASSERT(limit <= size); for (nul = 0; i < limit; i++) { if (nul) { kdata[i] = '\0'; continue; } if (data[i] != '\0') continue; nul = 1; } } for (i = size; i < fsize; i++) kdata[i] = 0; key->dtak_hashval = hashval; key->dtak_size = size; key->dtak_action = action; key->dtak_next = agb->dtagb_hash[ndx]; agb->dtagb_hash[ndx] = key; /* * Finally, apply the aggregator. */ *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); } /* * Given consumer state, this routine finds a speculation in the INACTIVE * state and transitions it into the ACTIVE state. If there is no speculation * in the INACTIVE state, 0 is returned. In this case, no error counter is * incremented -- it is up to the caller to take appropriate action. */ static int dtrace_speculation(dtrace_state_t *state) { int i = 0; dtrace_speculation_state_t current; uint32_t *stat = &state->dts_speculations_unavail, count; while (i < state->dts_nspeculations) { dtrace_speculation_t *spec = &state->dts_speculations[i]; current = spec->dtsp_state; if (current != DTRACESPEC_INACTIVE) { if (current == DTRACESPEC_COMMITTINGMANY || current == DTRACESPEC_COMMITTING || current == DTRACESPEC_DISCARDING) stat = &state->dts_speculations_busy; i++; continue; } if (dtrace_cas32((uint32_t *)&spec->dtsp_state, current, DTRACESPEC_ACTIVE) == current) return (i + 1); } /* * We couldn't find a speculation. If we found as much as a single * busy speculation buffer, we'll attribute this failure as "busy" * instead of "unavail". */ do { count = *stat; } while (dtrace_cas32(stat, count, count + 1) != count); return (0); } /* * This routine commits an active speculation. If the specified speculation * is not in a valid state to perform a commit(), this routine will silently do * nothing. The state of the specified speculation is transitioned according * to the state transition diagram outlined in */ static void dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, dtrace_specid_t which) { dtrace_speculation_t *spec; dtrace_buffer_t *src, *dest; uintptr_t daddr, saddr, dlimit, slimit; dtrace_speculation_state_t current, new = 0; intptr_t offs; uint64_t timestamp; if (which == 0) return; if (which > state->dts_nspeculations) { cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; return; } spec = &state->dts_speculations[which - 1]; src = &spec->dtsp_buffer[cpu]; dest = &state->dts_buffer[cpu]; do { current = spec->dtsp_state; if (current == DTRACESPEC_COMMITTINGMANY) break; switch (current) { case DTRACESPEC_INACTIVE: case DTRACESPEC_DISCARDING: return; case DTRACESPEC_COMMITTING: /* * This is only possible if we are (a) commit()'ing * without having done a prior speculate() on this CPU * and (b) racing with another commit() on a different * CPU. There's nothing to do -- we just assert that * our offset is 0. */ ASSERT(src->dtb_offset == 0); return; case DTRACESPEC_ACTIVE: new = DTRACESPEC_COMMITTING; break; case DTRACESPEC_ACTIVEONE: /* * This speculation is active on one CPU. If our * buffer offset is non-zero, we know that the one CPU * must be us. Otherwise, we are committing on a * different CPU from the speculate(), and we must * rely on being asynchronously cleaned. */ if (src->dtb_offset != 0) { new = DTRACESPEC_COMMITTING; break; } /*FALLTHROUGH*/ case DTRACESPEC_ACTIVEMANY: new = DTRACESPEC_COMMITTINGMANY; break; default: ASSERT(0); } } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new) != current); /* * We have set the state to indicate that we are committing this * speculation. Now reserve the necessary space in the destination * buffer. */ if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, sizeof (uint64_t), state, NULL)) < 0) { dtrace_buffer_drop(dest); goto out; } /* * We have sufficient space to copy the speculative buffer into the * primary buffer. First, modify the speculative buffer, filling * in the timestamp of all entries with the current time. The data * must have the commit() time rather than the time it was traced, * so that all entries in the primary buffer are in timestamp order. */ timestamp = dtrace_gethrtime(); saddr = (uintptr_t)src->dtb_tomax; slimit = saddr + src->dtb_offset; while (saddr < slimit) { size_t size; dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { saddr += sizeof (dtrace_epid_t); continue; } ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; ASSERT3U(saddr + size, <=, slimit); ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); saddr += size; } /* * Copy the buffer across. (Note that this is a * highly subobtimal bcopy(); in the unlikely event that this becomes * a serious performance issue, a high-performance DTrace-specific * bcopy() should obviously be invented.) */ daddr = (uintptr_t)dest->dtb_tomax + offs; dlimit = daddr + src->dtb_offset; saddr = (uintptr_t)src->dtb_tomax; /* * First, the aligned portion. */ while (dlimit - daddr >= sizeof (uint64_t)) { *((uint64_t *)daddr) = *((uint64_t *)saddr); daddr += sizeof (uint64_t); saddr += sizeof (uint64_t); } /* * Now any left-over bit... */ while (dlimit - daddr) *((uint8_t *)daddr++) = *((uint8_t *)saddr++); /* * Finally, commit the reserved space in the destination buffer. */ dest->dtb_offset = offs + src->dtb_offset; out: /* * If we're lucky enough to be the only active CPU on this speculation * buffer, we can just set the state back to DTRACESPEC_INACTIVE. */ if (current == DTRACESPEC_ACTIVE || (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); ASSERT(rval == DTRACESPEC_COMMITTING); } src->dtb_offset = 0; src->dtb_xamot_drops += src->dtb_drops; src->dtb_drops = 0; } /* * This routine discards an active speculation. If the specified speculation * is not in a valid state to perform a discard(), this routine will silently * do nothing. The state of the specified speculation is transitioned * according to the state transition diagram outlined in */ static void dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, dtrace_specid_t which) { dtrace_speculation_t *spec; dtrace_speculation_state_t current, new = 0; dtrace_buffer_t *buf; if (which == 0) return; if (which > state->dts_nspeculations) { cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; return; } spec = &state->dts_speculations[which - 1]; buf = &spec->dtsp_buffer[cpu]; do { current = spec->dtsp_state; switch (current) { case DTRACESPEC_INACTIVE: case DTRACESPEC_COMMITTINGMANY: case DTRACESPEC_COMMITTING: case DTRACESPEC_DISCARDING: return; case DTRACESPEC_ACTIVE: case DTRACESPEC_ACTIVEMANY: new = DTRACESPEC_DISCARDING; break; case DTRACESPEC_ACTIVEONE: if (buf->dtb_offset != 0) { new = DTRACESPEC_INACTIVE; } else { new = DTRACESPEC_DISCARDING; } break; default: ASSERT(0); } } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new) != current); buf->dtb_offset = 0; buf->dtb_drops = 0; } /* * Note: not called from probe context. This function is called * asynchronously from cross call context to clean any speculations that are * in the COMMITTINGMANY or DISCARDING states. These speculations may not be * transitioned back to the INACTIVE state until all CPUs have cleaned the * speculation. */ static void dtrace_speculation_clean_here(dtrace_state_t *state) { dtrace_icookie_t cookie; processorid_t cpu = curcpu; dtrace_buffer_t *dest = &state->dts_buffer[cpu]; dtrace_specid_t i; cookie = dtrace_interrupt_disable(); if (dest->dtb_tomax == NULL) { dtrace_interrupt_enable(cookie); return; } for (i = 0; i < state->dts_nspeculations; i++) { dtrace_speculation_t *spec = &state->dts_speculations[i]; dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; if (src->dtb_tomax == NULL) continue; if (spec->dtsp_state == DTRACESPEC_DISCARDING) { src->dtb_offset = 0; continue; } if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) continue; if (src->dtb_offset == 0) continue; dtrace_speculation_commit(state, cpu, i + 1); } dtrace_interrupt_enable(cookie); } /* * Note: not called from probe context. This function is called * asynchronously (and at a regular interval) to clean any speculations that * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there * is work to be done, it cross calls all CPUs to perform that work; * COMMITMANY and DISCARDING speculations may not be transitioned back to the * INACTIVE state until they have been cleaned by all CPUs. */ static void dtrace_speculation_clean(dtrace_state_t *state) { int work = 0, rv; dtrace_specid_t i; for (i = 0; i < state->dts_nspeculations; i++) { dtrace_speculation_t *spec = &state->dts_speculations[i]; ASSERT(!spec->dtsp_cleaning); if (spec->dtsp_state != DTRACESPEC_DISCARDING && spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) continue; work++; spec->dtsp_cleaning = 1; } if (!work) return; dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_speculation_clean_here, state); /* * We now know that all CPUs have committed or discarded their * speculation buffers, as appropriate. We can now set the state * to inactive. */ for (i = 0; i < state->dts_nspeculations; i++) { dtrace_speculation_t *spec = &state->dts_speculations[i]; dtrace_speculation_state_t current, new; if (!spec->dtsp_cleaning) continue; current = spec->dtsp_state; ASSERT(current == DTRACESPEC_DISCARDING || current == DTRACESPEC_COMMITTINGMANY); new = DTRACESPEC_INACTIVE; rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); ASSERT(rv == current); spec->dtsp_cleaning = 0; } } /* * Called as part of a speculate() to get the speculative buffer associated * with a given speculation. Returns NULL if the specified speculation is not * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and * the active CPU is not the specified CPU -- the speculation will be * atomically transitioned into the ACTIVEMANY state. */ static dtrace_buffer_t * dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, dtrace_specid_t which) { dtrace_speculation_t *spec; dtrace_speculation_state_t current, new = 0; dtrace_buffer_t *buf; if (which == 0) return (NULL); if (which > state->dts_nspeculations) { cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; return (NULL); } spec = &state->dts_speculations[which - 1]; buf = &spec->dtsp_buffer[cpuid]; do { current = spec->dtsp_state; switch (current) { case DTRACESPEC_INACTIVE: case DTRACESPEC_COMMITTINGMANY: case DTRACESPEC_DISCARDING: return (NULL); case DTRACESPEC_COMMITTING: ASSERT(buf->dtb_offset == 0); return (NULL); case DTRACESPEC_ACTIVEONE: /* * This speculation is currently active on one CPU. * Check the offset in the buffer; if it's non-zero, * that CPU must be us (and we leave the state alone). * If it's zero, assume that we're starting on a new * CPU -- and change the state to indicate that the * speculation is active on more than one CPU. */ if (buf->dtb_offset != 0) return (buf); new = DTRACESPEC_ACTIVEMANY; break; case DTRACESPEC_ACTIVEMANY: return (buf); case DTRACESPEC_ACTIVE: new = DTRACESPEC_ACTIVEONE; break; default: ASSERT(0); } } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new) != current); ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); return (buf); } /* * Return a string. In the event that the user lacks the privilege to access * arbitrary kernel memory, we copy the string out to scratch memory so that we * don't fail access checking. * * dtrace_dif_variable() uses this routine as a helper for various * builtin values such as 'execname' and 'probefunc.' */ uintptr_t dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, dtrace_mstate_t *mstate) { uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t ret; size_t strsz; /* * The easy case: this probe is allowed to read all of memory, so * we can just return this as a vanilla pointer. */ if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) return (addr); /* * This is the tougher case: we copy the string in question from * kernel memory into scratch memory and return it that way: this * ensures that we won't trip up when access checking tests the * BYREF return value. */ strsz = dtrace_strlen((char *)addr, size) + 1; if (mstate->dtms_scratch_ptr + strsz > mstate->dtms_scratch_base + mstate->dtms_scratch_size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); return (0); } dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, strsz); ret = mstate->dtms_scratch_ptr; mstate->dtms_scratch_ptr += strsz; return (ret); } /* * Return a string from a memoy address which is known to have one or * more concatenated, individually zero terminated, sub-strings. * In the event that the user lacks the privilege to access * arbitrary kernel memory, we copy the string out to scratch memory so that we * don't fail access checking. * * dtrace_dif_variable() uses this routine as a helper for various * builtin values such as 'execargs'. */ static uintptr_t dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, dtrace_mstate_t *mstate) { char *p; size_t i; uintptr_t ret; if (mstate->dtms_scratch_ptr + strsz > mstate->dtms_scratch_base + mstate->dtms_scratch_size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); return (0); } dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, strsz); /* Replace sub-string termination characters with a space. */ for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; p++, i++) if (*p == '\0') *p = ' '; ret = mstate->dtms_scratch_ptr; mstate->dtms_scratch_ptr += strsz; return (ret); } /* * This function implements the DIF emulator's variable lookups. The emulator * passes a reserved variable identifier and optional built-in array index. */ static uint64_t dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, uint64_t ndx) { /* * If we're accessing one of the uncached arguments, we'll turn this * into a reference in the args array. */ if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { ndx = v - DIF_VAR_ARG0; v = DIF_VAR_ARGS; } switch (v) { case DIF_VAR_ARGS: ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); if (ndx >= sizeof (mstate->dtms_arg) / sizeof (mstate->dtms_arg[0])) { int aframes = mstate->dtms_probe->dtpr_aframes + 2; dtrace_provider_t *pv; uint64_t val; pv = mstate->dtms_probe->dtpr_provider; if (pv->dtpv_pops.dtps_getargval != NULL) val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, mstate->dtms_probe->dtpr_id, mstate->dtms_probe->dtpr_arg, ndx, aframes); else val = dtrace_getarg(ndx, aframes); /* * This is regrettably required to keep the compiler * from tail-optimizing the call to dtrace_getarg(). * The condition always evaluates to true, but the * compiler has no way of figuring that out a priori. * (None of this would be necessary if the compiler * could be relied upon to _always_ tail-optimize * the call to dtrace_getarg() -- but it can't.) */ if (mstate->dtms_probe != NULL) return (val); ASSERT(0); } return (mstate->dtms_arg[ndx]); #ifdef illumos case DIF_VAR_UREGS: { klwp_t *lwp; if (!dtrace_priv_proc(state)) return (0); if ((lwp = curthread->t_lwp) == NULL) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[curcpu].cpuc_dtrace_illval = NULL; return (0); } return (dtrace_getreg(lwp->lwp_regs, ndx)); return (0); } #else case DIF_VAR_UREGS: { struct trapframe *tframe; if (!dtrace_priv_proc(state)) return (0); if ((tframe = curthread->td_frame) == NULL) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[curcpu].cpuc_dtrace_illval = 0; return (0); } return (dtrace_getreg(tframe, ndx)); } #endif case DIF_VAR_CURTHREAD: if (!dtrace_priv_proc(state)) return (0); return ((uint64_t)(uintptr_t)curthread); case DIF_VAR_TIMESTAMP: if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { mstate->dtms_timestamp = dtrace_gethrtime(); mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; } return (mstate->dtms_timestamp); case DIF_VAR_VTIMESTAMP: ASSERT(dtrace_vtime_references != 0); return (curthread->t_dtrace_vtime); case DIF_VAR_WALLTIMESTAMP: if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { mstate->dtms_walltimestamp = dtrace_gethrestime(); mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; } return (mstate->dtms_walltimestamp); #ifdef illumos case DIF_VAR_IPL: if (!dtrace_priv_kernel(state)) return (0); if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { mstate->dtms_ipl = dtrace_getipl(); mstate->dtms_present |= DTRACE_MSTATE_IPL; } return (mstate->dtms_ipl); #endif case DIF_VAR_EPID: ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); return (mstate->dtms_epid); case DIF_VAR_ID: ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); return (mstate->dtms_probe->dtpr_id); case DIF_VAR_STACKDEPTH: if (!dtrace_priv_kernel(state)) return (0); if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { int aframes = mstate->dtms_probe->dtpr_aframes + 2; mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; } return (mstate->dtms_stackdepth); case DIF_VAR_USTACKDEPTH: if (!dtrace_priv_proc(state)) return (0); if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) { mstate->dtms_ustackdepth = 0; } else { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); mstate->dtms_ustackdepth = dtrace_getustackdepth(); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); } mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; } return (mstate->dtms_ustackdepth); case DIF_VAR_CALLER: if (!dtrace_priv_kernel(state)) return (0); if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { int aframes = mstate->dtms_probe->dtpr_aframes + 2; if (!DTRACE_ANCHORED(mstate->dtms_probe)) { /* * If this is an unanchored probe, we are * required to go through the slow path: * dtrace_caller() only guarantees correct * results for anchored probes. */ pc_t caller[2] = {0, 0}; dtrace_getpcstack(caller, 2, aframes, (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); mstate->dtms_caller = caller[1]; } else if ((mstate->dtms_caller = dtrace_caller(aframes)) == -1) { /* * We have failed to do this the quick way; * we must resort to the slower approach of * calling dtrace_getpcstack(). */ pc_t caller = 0; dtrace_getpcstack(&caller, 1, aframes, NULL); mstate->dtms_caller = caller; } mstate->dtms_present |= DTRACE_MSTATE_CALLER; } return (mstate->dtms_caller); case DIF_VAR_UCALLER: if (!dtrace_priv_proc(state)) return (0); if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { uint64_t ustack[3]; /* * dtrace_getupcstack() fills in the first uint64_t * with the current PID. The second uint64_t will * be the program counter at user-level. The third * uint64_t will contain the caller, which is what * we're after. */ ustack[2] = 0; DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_getupcstack(ustack, 3); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); mstate->dtms_ucaller = ustack[2]; mstate->dtms_present |= DTRACE_MSTATE_UCALLER; } return (mstate->dtms_ucaller); case DIF_VAR_PROBEPROV: ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); return (dtrace_dif_varstr( (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, state, mstate)); case DIF_VAR_PROBEMOD: ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); return (dtrace_dif_varstr( (uintptr_t)mstate->dtms_probe->dtpr_mod, state, mstate)); case DIF_VAR_PROBEFUNC: ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); return (dtrace_dif_varstr( (uintptr_t)mstate->dtms_probe->dtpr_func, state, mstate)); case DIF_VAR_PROBENAME: ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); return (dtrace_dif_varstr( (uintptr_t)mstate->dtms_probe->dtpr_name, state, mstate)); case DIF_VAR_PID: if (!dtrace_priv_proc(state)) return (0); #ifdef illumos /* * Note that we are assuming that an unanchored probe is * always due to a high-level interrupt. (And we're assuming * that there is only a single high level interrupt.) */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return (pid0.pid_id); /* * It is always safe to dereference one's own t_procp pointer: * it always points to a valid, allocated proc structure. * Further, it is always safe to dereference the p_pidp member * of one's own proc structure. (These are truisms becuase * threads and processes don't clean up their own state -- * they leave that task to whomever reaps them.) */ return ((uint64_t)curthread->t_procp->p_pidp->pid_id); #else return ((uint64_t)curproc->p_pid); #endif case DIF_VAR_PPID: if (!dtrace_priv_proc(state)) return (0); #ifdef illumos /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return (pid0.pid_id); /* * It is always safe to dereference one's own t_procp pointer: * it always points to a valid, allocated proc structure. * (This is true because threads don't clean up their own * state -- they leave that task to whomever reaps them.) */ return ((uint64_t)curthread->t_procp->p_ppid); #else if (curproc->p_pid == proc0.p_pid) return (curproc->p_pid); else return (curproc->p_pptr->p_pid); #endif case DIF_VAR_TID: #ifdef illumos /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return (0); #endif return ((uint64_t)curthread->t_tid); case DIF_VAR_EXECARGS: { struct pargs *p_args = curthread->td_proc->p_args; if (p_args == NULL) return(0); return (dtrace_dif_varstrz( (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); } case DIF_VAR_EXECNAME: #ifdef illumos if (!dtrace_priv_proc(state)) return (0); /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return ((uint64_t)(uintptr_t)p0.p_user.u_comm); /* * It is always safe to dereference one's own t_procp pointer: * it always points to a valid, allocated proc structure. * (This is true because threads don't clean up their own * state -- they leave that task to whomever reaps them.) */ return (dtrace_dif_varstr( (uintptr_t)curthread->t_procp->p_user.u_comm, state, mstate)); #else return (dtrace_dif_varstr( (uintptr_t) curthread->td_proc->p_comm, state, mstate)); #endif case DIF_VAR_ZONENAME: #ifdef illumos if (!dtrace_priv_proc(state)) return (0); /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); /* * It is always safe to dereference one's own t_procp pointer: * it always points to a valid, allocated proc structure. * (This is true because threads don't clean up their own * state -- they leave that task to whomever reaps them.) */ return (dtrace_dif_varstr( (uintptr_t)curthread->t_procp->p_zone->zone_name, state, mstate)); #else return (0); #endif case DIF_VAR_UID: if (!dtrace_priv_proc(state)) return (0); #ifdef illumos /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return ((uint64_t)p0.p_cred->cr_uid); /* * It is always safe to dereference one's own t_procp pointer: * it always points to a valid, allocated proc structure. * (This is true because threads don't clean up their own * state -- they leave that task to whomever reaps them.) * * Additionally, it is safe to dereference one's own process * credential, since this is never NULL after process birth. */ return ((uint64_t)curthread->t_procp->p_cred->cr_uid); #else return ((uint64_t)curthread->td_ucred->cr_uid); #endif case DIF_VAR_GID: if (!dtrace_priv_proc(state)) return (0); #ifdef illumos /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return ((uint64_t)p0.p_cred->cr_gid); /* * It is always safe to dereference one's own t_procp pointer: * it always points to a valid, allocated proc structure. * (This is true because threads don't clean up their own * state -- they leave that task to whomever reaps them.) * * Additionally, it is safe to dereference one's own process * credential, since this is never NULL after process birth. */ return ((uint64_t)curthread->t_procp->p_cred->cr_gid); #else return ((uint64_t)curthread->td_ucred->cr_gid); #endif case DIF_VAR_ERRNO: { #ifdef illumos klwp_t *lwp; if (!dtrace_priv_proc(state)) return (0); /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) return (0); /* * It is always safe to dereference one's own t_lwp pointer in * the event that this pointer is non-NULL. (This is true * because threads and lwps don't clean up their own state -- * they leave that task to whomever reaps them.) */ if ((lwp = curthread->t_lwp) == NULL) return (0); return ((uint64_t)lwp->lwp_errno); #else return (curthread->td_errno); #endif } #ifndef illumos case DIF_VAR_CPU: { return curcpu; } #endif default: DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); return (0); } } typedef enum dtrace_json_state { DTRACE_JSON_REST = 1, DTRACE_JSON_OBJECT, DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, DTRACE_JSON_STRING_ESCAPE_UNICODE, DTRACE_JSON_COLON, DTRACE_JSON_COMMA, DTRACE_JSON_VALUE, DTRACE_JSON_IDENTIFIER, DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP, DTRACE_JSON_COLLECT_OBJECT } dtrace_json_state_t; /* * This function possesses just enough knowledge about JSON to extract a single * value from a JSON string and store it in the scratch buffer. It is able * to extract nested object values, and members of arrays by index. * * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to * be looked up as we descend into the object tree. e.g. * * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL * with nelems = 5. * * The run time of this function must be bounded above by strsize to limit the * amount of work done in probe context. As such, it is implemented as a * simple state machine, reading one character at a time using safe loads * until we find the requested element, hit a parsing error or run off the * end of the object or string. * * As there is no way for a subroutine to return an error without interrupting * clause execution, we simply return NULL in the event of a missing key or any * other error condition. Each NULL return in this function is commented with * the error condition it represents -- parsing or otherwise. * * The set of states for the state machine closely matches the JSON * specification (http://json.org/). Briefly: * * DTRACE_JSON_REST: * Skip whitespace until we find either a top-level Object, moving * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE. * * DTRACE_JSON_OBJECT: * Locate the next key String in an Object. Sets a flag to denote * the next String as a key string and moves to DTRACE_JSON_STRING. * * DTRACE_JSON_COLON: * Skip whitespace until we find the colon that separates key Strings * from their values. Once found, move to DTRACE_JSON_VALUE. * * DTRACE_JSON_VALUE: * Detects the type of the next value (String, Number, Identifier, Object * or Array) and routes to the states that process that type. Here we also * deal with the element selector list if we are requested to traverse down * into the object tree. * * DTRACE_JSON_COMMA: * Skip whitespace until we find the comma that separates key-value pairs * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays * (similarly DTRACE_JSON_VALUE). All following literal value processing * states return to this state at the end of their value, unless otherwise * noted. * * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP: * Processes a Number literal from the JSON, including any exponent * component that may be present. Numbers are returned as strings, which * may be passed to strtoll() if an integer is required. * * DTRACE_JSON_IDENTIFIER: * Processes a "true", "false" or "null" literal in the JSON. * * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE, * DTRACE_JSON_STRING_ESCAPE_UNICODE: * Processes a String literal from the JSON, whether the String denotes * a key, a value or part of a larger Object. Handles all escape sequences * present in the specification, including four-digit unicode characters, * but merely includes the escape sequence without converting it to the * actual escaped character. If the String is flagged as a key, we * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA. * * DTRACE_JSON_COLLECT_OBJECT: * This state collects an entire Object (or Array), correctly handling * embedded strings. If the full element selector list matches this nested * object, we return the Object in full as a string. If not, we use this * state to skip to the next value at this level and continue processing. * * NOTE: This function uses various macros from strtolctype.h to manipulate * digit values, etc -- these have all been checked to ensure they make * no additional function calls. */ static char * dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, char *dest) { dtrace_json_state_t state = DTRACE_JSON_REST; int64_t array_elem = INT64_MIN; int64_t array_pos = 0; uint8_t escape_unicount = 0; boolean_t string_is_key = B_FALSE; boolean_t collect_object = B_FALSE; boolean_t found_key = B_FALSE; boolean_t in_array = B_FALSE; uint32_t braces = 0, brackets = 0; char *elem = elemlist; char *dd = dest; uintptr_t cur; for (cur = json; cur < json + size; cur++) { char cc = dtrace_load8(cur); if (cc == '\0') return (NULL); switch (state) { case DTRACE_JSON_REST: if (isspace(cc)) break; if (cc == '{') { state = DTRACE_JSON_OBJECT; break; } if (cc == '[') { in_array = B_TRUE; array_pos = 0; array_elem = dtrace_strtoll(elem, 10, size); found_key = array_elem == 0 ? B_TRUE : B_FALSE; state = DTRACE_JSON_VALUE; break; } /* * ERROR: expected to find a top-level object or array. */ return (NULL); case DTRACE_JSON_OBJECT: if (isspace(cc)) break; if (cc == '"') { state = DTRACE_JSON_STRING; string_is_key = B_TRUE; break; } /* * ERROR: either the object did not start with a key * string, or we've run off the end of the object * without finding the requested key. */ return (NULL); case DTRACE_JSON_STRING: if (cc == '\\') { *dd++ = '\\'; state = DTRACE_JSON_STRING_ESCAPE; break; } if (cc == '"') { if (collect_object) { /* * We don't reset the dest here, as * the string is part of a larger * object being collected. */ *dd++ = cc; collect_object = B_FALSE; state = DTRACE_JSON_COLLECT_OBJECT; break; } *dd = '\0'; dd = dest; /* reset string buffer */ if (string_is_key) { if (dtrace_strncmp(dest, elem, size) == 0) found_key = B_TRUE; } else if (found_key) { if (nelems > 1) { /* * We expected an object, not * this string. */ return (NULL); } return (dest); } state = string_is_key ? DTRACE_JSON_COLON : DTRACE_JSON_COMMA; string_is_key = B_FALSE; break; } *dd++ = cc; break; case DTRACE_JSON_STRING_ESCAPE: *dd++ = cc; if (cc == 'u') { escape_unicount = 0; state = DTRACE_JSON_STRING_ESCAPE_UNICODE; } else { state = DTRACE_JSON_STRING; } break; case DTRACE_JSON_STRING_ESCAPE_UNICODE: if (!isxdigit(cc)) { /* * ERROR: invalid unicode escape, expected * four valid hexidecimal digits. */ return (NULL); } *dd++ = cc; if (++escape_unicount == 4) state = DTRACE_JSON_STRING; break; case DTRACE_JSON_COLON: if (isspace(cc)) break; if (cc == ':') { state = DTRACE_JSON_VALUE; break; } /* * ERROR: expected a colon. */ return (NULL); case DTRACE_JSON_COMMA: if (isspace(cc)) break; if (cc == ',') { if (in_array) { state = DTRACE_JSON_VALUE; if (++array_pos == array_elem) found_key = B_TRUE; } else { state = DTRACE_JSON_OBJECT; } break; } /* * ERROR: either we hit an unexpected character, or * we reached the end of the object or array without * finding the requested key. */ return (NULL); case DTRACE_JSON_IDENTIFIER: if (islower(cc)) { *dd++ = cc; break; } *dd = '\0'; dd = dest; /* reset string buffer */ if (dtrace_strncmp(dest, "true", 5) == 0 || dtrace_strncmp(dest, "false", 6) == 0 || dtrace_strncmp(dest, "null", 5) == 0) { if (found_key) { if (nelems > 1) { /* * ERROR: We expected an object, * not this identifier. */ return (NULL); } return (dest); } else { cur--; state = DTRACE_JSON_COMMA; break; } } /* * ERROR: we did not recognise the identifier as one * of those in the JSON specification. */ return (NULL); case DTRACE_JSON_NUMBER: if (cc == '.') { *dd++ = cc; state = DTRACE_JSON_NUMBER_FRAC; break; } if (cc == 'x' || cc == 'X') { /* * ERROR: specification explicitly excludes * hexidecimal or octal numbers. */ return (NULL); } /* FALLTHRU */ case DTRACE_JSON_NUMBER_FRAC: if (cc == 'e' || cc == 'E') { *dd++ = cc; state = DTRACE_JSON_NUMBER_EXP; break; } if (cc == '+' || cc == '-') { /* * ERROR: expect sign as part of exponent only. */ return (NULL); } /* FALLTHRU */ case DTRACE_JSON_NUMBER_EXP: if (isdigit(cc) || cc == '+' || cc == '-') { *dd++ = cc; break; } *dd = '\0'; dd = dest; /* reset string buffer */ if (found_key) { if (nelems > 1) { /* * ERROR: We expected an object, not * this number. */ return (NULL); } return (dest); } cur--; state = DTRACE_JSON_COMMA; break; case DTRACE_JSON_VALUE: if (isspace(cc)) break; if (cc == '{' || cc == '[') { if (nelems > 1 && found_key) { in_array = cc == '[' ? B_TRUE : B_FALSE; /* * If our element selector directs us * to descend into this nested object, * then move to the next selector * element in the list and restart the * state machine. */ while (*elem != '\0') elem++; elem++; /* skip the inter-element NUL */ nelems--; dd = dest; if (in_array) { state = DTRACE_JSON_VALUE; array_pos = 0; array_elem = dtrace_strtoll( elem, 10, size); found_key = array_elem == 0 ? B_TRUE : B_FALSE; } else { found_key = B_FALSE; state = DTRACE_JSON_OBJECT; } break; } /* * Otherwise, we wish to either skip this * nested object or return it in full. */ if (cc == '[') brackets = 1; else braces = 1; *dd++ = cc; state = DTRACE_JSON_COLLECT_OBJECT; break; } if (cc == '"') { state = DTRACE_JSON_STRING; break; } if (islower(cc)) { /* * Here we deal with true, false and null. */ *dd++ = cc; state = DTRACE_JSON_IDENTIFIER; break; } if (cc == '-' || isdigit(cc)) { *dd++ = cc; state = DTRACE_JSON_NUMBER; break; } /* * ERROR: unexpected character at start of value. */ return (NULL); case DTRACE_JSON_COLLECT_OBJECT: if (cc == '\0') /* * ERROR: unexpected end of input. */ return (NULL); *dd++ = cc; if (cc == '"') { collect_object = B_TRUE; state = DTRACE_JSON_STRING; break; } if (cc == ']') { if (brackets-- == 0) { /* * ERROR: unbalanced brackets. */ return (NULL); } } else if (cc == '}') { if (braces-- == 0) { /* * ERROR: unbalanced braces. */ return (NULL); } } else if (cc == '{') { braces++; } else if (cc == '[') { brackets++; } if (brackets == 0 && braces == 0) { if (found_key) { *dd = '\0'; return (dest); } dd = dest; /* reset string buffer */ state = DTRACE_JSON_COMMA; } break; } } return (NULL); } /* * Emulate the execution of DTrace ID subroutines invoked by the call opcode. * Notice that we don't bother validating the proper number of arguments or * their types in the tuple stack. This isn't needed because all argument * interpretation is safe because of our load safety -- the worst that can * happen is that a bogus program can obtain bogus results. */ static void dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, dtrace_key_t *tupregs, int nargs, dtrace_mstate_t *mstate, dtrace_state_t *state) { volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; dtrace_vstate_t *vstate = &state->dts_vstate; #ifdef illumos union { mutex_impl_t mi; uint64_t mx; } m; union { krwlock_t ri; uintptr_t rw; } r; #else struct thread *lowner; union { struct lock_object *li; uintptr_t lx; } l; #endif switch (subr) { case DIF_SUBR_RAND: regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; break; #ifdef illumos case DIF_SUBR_MUTEX_OWNED: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { regs[rd] = 0; break; } m.mx = dtrace_load64(tupregs[0].dttk_value); if (MUTEX_TYPE_ADAPTIVE(&m.mi)) regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; else regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); break; case DIF_SUBR_MUTEX_OWNER: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { regs[rd] = 0; break; } m.mx = dtrace_load64(tupregs[0].dttk_value); if (MUTEX_TYPE_ADAPTIVE(&m.mi) && MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); else regs[rd] = 0; break; case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { regs[rd] = 0; break; } m.mx = dtrace_load64(tupregs[0].dttk_value); regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); break; case DIF_SUBR_MUTEX_TYPE_SPIN: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), mstate, vstate)) { regs[rd] = 0; break; } m.mx = dtrace_load64(tupregs[0].dttk_value); regs[rd] = MUTEX_TYPE_SPIN(&m.mi); break; case DIF_SUBR_RW_READ_HELD: { uintptr_t tmp; if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), mstate, vstate)) { regs[rd] = 0; break; } r.rw = dtrace_loadptr(tupregs[0].dttk_value); regs[rd] = _RW_READ_HELD(&r.ri, tmp); break; } case DIF_SUBR_RW_WRITE_HELD: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), mstate, vstate)) { regs[rd] = 0; break; } r.rw = dtrace_loadptr(tupregs[0].dttk_value); regs[rd] = _RW_WRITE_HELD(&r.ri); break; case DIF_SUBR_RW_ISWRITER: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), mstate, vstate)) { regs[rd] = 0; break; } r.rw = dtrace_loadptr(tupregs[0].dttk_value); regs[rd] = _RW_ISWRITER(&r.ri); break; #else /* !illumos */ case DIF_SUBR_MUTEX_OWNED: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct lock_object), mstate, vstate)) { regs[rd] = 0; break; } l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_SUBR_MUTEX_OWNER: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct lock_object), mstate, vstate)) { regs[rd] = 0; break; } l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); regs[rd] = (uintptr_t)lowner; break; case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), mstate, vstate)) { regs[rd] = 0; break; } l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SLEEPLOCK) != 0; DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_SUBR_MUTEX_TYPE_SPIN: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), mstate, vstate)) { regs[rd] = 0; break; } l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_SUBR_RW_READ_HELD: case DIF_SUBR_SX_SHARED_HELD: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), mstate, vstate)) { regs[rd] = 0; break; } l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && lowner == NULL; DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_SUBR_RW_WRITE_HELD: case DIF_SUBR_SX_EXCLUSIVE_HELD: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), mstate, vstate)) { regs[rd] = 0; break; } l.lx = dtrace_loadptr(tupregs[0].dttk_value); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && lowner != NULL; DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_SUBR_RW_ISWRITER: case DIF_SUBR_SX_ISEXCLUSIVE: if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), mstate, vstate)) { regs[rd] = 0; break; } l.lx = dtrace_loadptr(tupregs[0].dttk_value); DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); regs[rd] = (lowner == curthread); break; #endif /* illumos */ case DIF_SUBR_BCOPY: { /* * We need to be sure that the destination is in the scratch * region -- no other region is allowed. */ uintptr_t src = tupregs[0].dttk_value; uintptr_t dest = tupregs[1].dttk_value; size_t size = tupregs[2].dttk_value; if (!dtrace_inscratch(dest, size, mstate)) { *flags |= CPU_DTRACE_BADADDR; *illval = regs[rd]; break; } if (!dtrace_canload(src, size, mstate, vstate)) { regs[rd] = 0; break; } dtrace_bcopy((void *)src, (void *)dest, size); break; } case DIF_SUBR_ALLOCA: case DIF_SUBR_COPYIN: { uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); uint64_t size = tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; /* * This action doesn't require any credential checks since * probes will not activate in user contexts to which the * enabling user does not have permissions. */ /* * Rounding up the user allocation size could have overflowed * a large, bogus allocation (like -1ULL) to 0. */ if (scratch_size < size || !DTRACE_INSCRATCH(mstate, scratch_size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } if (subr == DIF_SUBR_COPYIN) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); } mstate->dtms_scratch_ptr += scratch_size; regs[rd] = dest; break; } case DIF_SUBR_COPYINTO: { uint64_t size = tupregs[1].dttk_value; uintptr_t dest = tupregs[2].dttk_value; /* * This action doesn't require any credential checks since * probes will not activate in user contexts to which the * enabling user does not have permissions. */ if (!dtrace_inscratch(dest, size, mstate)) { *flags |= CPU_DTRACE_BADADDR; *illval = regs[rd]; break; } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; } case DIF_SUBR_COPYINSTR: { uintptr_t dest = mstate->dtms_scratch_ptr; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; if (nargs > 1 && tupregs[1].dttk_value < size) size = tupregs[1].dttk_value + 1; /* * This action doesn't require any credential checks since * probes will not activate in user contexts to which the * enabling user does not have permissions. */ if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); ((char *)dest)[size - 1] = '\0'; mstate->dtms_scratch_ptr += size; regs[rd] = dest; break; } #ifdef illumos case DIF_SUBR_MSGSIZE: case DIF_SUBR_MSGDSIZE: { uintptr_t baddr = tupregs[0].dttk_value, daddr; uintptr_t wptr, rptr; size_t count = 0; int cont = 0; while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, vstate)) { regs[rd] = 0; break; } wptr = dtrace_loadptr(baddr + offsetof(mblk_t, b_wptr)); rptr = dtrace_loadptr(baddr + offsetof(mblk_t, b_rptr)); if (wptr < rptr) { *flags |= CPU_DTRACE_BADADDR; *illval = tupregs[0].dttk_value; break; } daddr = dtrace_loadptr(baddr + offsetof(mblk_t, b_datap)); baddr = dtrace_loadptr(baddr + offsetof(mblk_t, b_cont)); /* * We want to prevent against denial-of-service here, * so we're only going to search the list for * dtrace_msgdsize_max mblks. */ if (cont++ > dtrace_msgdsize_max) { *flags |= CPU_DTRACE_ILLOP; break; } if (subr == DIF_SUBR_MSGDSIZE) { if (dtrace_load8(daddr + offsetof(dblk_t, db_type)) != M_DATA) continue; } count += wptr - rptr; } if (!(*flags & CPU_DTRACE_FAULT)) regs[rd] = count; break; } #endif case DIF_SUBR_PROGENYOF: { pid_t pid = tupregs[0].dttk_value; proc_t *p; int rval = 0; DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); for (p = curthread->t_procp; p != NULL; p = p->p_parent) { #ifdef illumos if (p->p_pidp->pid_id == pid) { #else if (p->p_pid == pid) { #endif rval = 1; break; } } DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); regs[rd] = rval; break; } case DIF_SUBR_SPECULATION: regs[rd] = dtrace_speculation(state); break; case DIF_SUBR_COPYOUT: { uintptr_t kaddr = tupregs[0].dttk_value; uintptr_t uaddr = tupregs[1].dttk_value; uint64_t size = tupregs[2].dttk_value; if (!dtrace_destructive_disallow && dtrace_priv_proc_control(state) && !dtrace_istoxic(kaddr, size) && dtrace_canload(kaddr, size, mstate, vstate)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_copyout(kaddr, uaddr, size, flags); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); } break; } case DIF_SUBR_COPYOUTSTR: { uintptr_t kaddr = tupregs[0].dttk_value; uintptr_t uaddr = tupregs[1].dttk_value; uint64_t size = tupregs[2].dttk_value; size_t lim; if (!dtrace_destructive_disallow && dtrace_priv_proc_control(state) && !dtrace_istoxic(kaddr, size) && dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_copyoutstr(kaddr, uaddr, lim, flags); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); } break; } case DIF_SUBR_STRLEN: { size_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; size_t lim; if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { regs[rd] = 0; break; } regs[rd] = dtrace_strlen((char *)addr, lim); break; } case DIF_SUBR_STRCHR: case DIF_SUBR_STRRCHR: { /* * We're going to iterate over the string looking for the * specified character. We will iterate until we have reached * the string length or we have found the character. If this * is DIF_SUBR_STRRCHR, we will look for the last occurrence * of the specified character instead of the first. */ uintptr_t addr = tupregs[0].dttk_value; uintptr_t addr_limit; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; size_t lim; char c, target = (char)tupregs[1].dttk_value; if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) { regs[rd] = 0; break; } addr_limit = addr + lim; for (regs[rd] = 0; addr < addr_limit; addr++) { if ((c = dtrace_load8(addr)) == target) { regs[rd] = addr; if (subr == DIF_SUBR_STRCHR) break; } if (c == '\0') break; } break; } case DIF_SUBR_STRSTR: case DIF_SUBR_INDEX: case DIF_SUBR_RINDEX: { /* * We're going to iterate over the string looking for the * specified string. We will iterate until we have reached * the string length or we have found the string. (Yes, this * is done in the most naive way possible -- but considering * that the string we're searching for is likely to be * relatively short, the complexity of Rabin-Karp or similar * hardly seems merited.) */ char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; size_t len = dtrace_strlen(addr, size); size_t sublen = dtrace_strlen(substr, size); char *limit = addr + len, *orig = addr; int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; int inc = 1; regs[rd] = notfound; if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { regs[rd] = 0; break; } if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, vstate)) { regs[rd] = 0; break; } /* * strstr() and index()/rindex() have similar semantics if * both strings are the empty string: strstr() returns a * pointer to the (empty) string, and index() and rindex() * both return index 0 (regardless of any position argument). */ if (sublen == 0 && len == 0) { if (subr == DIF_SUBR_STRSTR) regs[rd] = (uintptr_t)addr; else regs[rd] = 0; break; } if (subr != DIF_SUBR_STRSTR) { if (subr == DIF_SUBR_RINDEX) { limit = orig - 1; addr += len; inc = -1; } /* * Both index() and rindex() take an optional position * argument that denotes the starting position. */ if (nargs == 3) { int64_t pos = (int64_t)tupregs[2].dttk_value; /* * If the position argument to index() is * negative, Perl implicitly clamps it at * zero. This semantic is a little surprising * given the special meaning of negative * positions to similar Perl functions like * substr(), but it appears to reflect a * notion that index() can start from a * negative index and increment its way up to * the string. Given this notion, Perl's * rindex() is at least self-consistent in * that it implicitly clamps positions greater * than the string length to be the string * length. Where Perl completely loses * coherence, however, is when the specified * substring is the empty string (""). In * this case, even if the position is * negative, rindex() returns 0 -- and even if * the position is greater than the length, * index() returns the string length. These * semantics violate the notion that index() * should never return a value less than the * specified position and that rindex() should * never return a value greater than the * specified position. (One assumes that * these semantics are artifacts of Perl's * implementation and not the results of * deliberate design -- it beggars belief that * even Larry Wall could desire such oddness.) * While in the abstract one would wish for * consistent position semantics across * substr(), index() and rindex() -- or at the * very least self-consistent position * semantics for index() and rindex() -- we * instead opt to keep with the extant Perl * semantics, in all their broken glory. (Do * we have more desire to maintain Perl's * semantics than Perl does? Probably.) */ if (subr == DIF_SUBR_RINDEX) { if (pos < 0) { if (sublen == 0) regs[rd] = 0; break; } if (pos > len) pos = len; } else { if (pos < 0) pos = 0; if (pos >= len) { if (sublen == 0) regs[rd] = len; break; } } addr = orig + pos; } } for (regs[rd] = notfound; addr != limit; addr += inc) { if (dtrace_strncmp(addr, substr, sublen) == 0) { if (subr != DIF_SUBR_STRSTR) { /* * As D index() and rindex() are * modeled on Perl (and not on awk), * we return a zero-based (and not a * one-based) index. (For you Perl * weenies: no, we're not going to add * $[ -- and shouldn't you be at a con * or something?) */ regs[rd] = (uintptr_t)(addr - orig); break; } ASSERT(subr == DIF_SUBR_STRSTR); regs[rd] = (uintptr_t)addr; break; } } break; } case DIF_SUBR_STRTOK: { uintptr_t addr = tupregs[0].dttk_value; uintptr_t tokaddr = tupregs[1].dttk_value; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t limit, toklimit; size_t clim; uint8_t c = 0, tokmap[32]; /* 256 / 8 */ char *dest = (char *)mstate->dtms_scratch_ptr; int i; /* * Check both the token buffer and (later) the input buffer, * since both could be non-scratch addresses. */ if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) { regs[rd] = 0; break; } toklimit = tokaddr + clim; if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } if (addr == 0) { /* * If the address specified is NULL, we use our saved * strtok pointer from the mstate. Note that this * means that the saved strtok pointer is _only_ * valid within multiple enablings of the same probe -- * it behaves like an implicit clause-local variable. */ addr = mstate->dtms_strtok; limit = mstate->dtms_strtok_limit; } else { /* * If the user-specified address is non-NULL we must * access check it. This is the only time we have * a chance to do so, since this address may reside * in the string table of this clause-- future calls * (when we fetch addr from mstate->dtms_strtok) * would fail this access check. */ if (!dtrace_strcanload(addr, size, &clim, mstate, vstate)) { regs[rd] = 0; break; } limit = addr + clim; } /* * First, zero the token map, and then process the token * string -- setting a bit in the map for every character * found in the token string. */ for (i = 0; i < sizeof (tokmap); i++) tokmap[i] = 0; for (; tokaddr < toklimit; tokaddr++) { if ((c = dtrace_load8(tokaddr)) == '\0') break; ASSERT((c >> 3) < sizeof (tokmap)); tokmap[c >> 3] |= (1 << (c & 0x7)); } for (; addr < limit; addr++) { /* * We're looking for a character that is _not_ * contained in the token string. */ if ((c = dtrace_load8(addr)) == '\0') break; if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) break; } if (c == '\0') { /* * We reached the end of the string without finding * any character that was not in the token string. * We return NULL in this case, and we set the saved * address to NULL as well. */ regs[rd] = 0; mstate->dtms_strtok = 0; mstate->dtms_strtok_limit = 0; break; } /* * From here on, we're copying into the destination string. */ for (i = 0; addr < limit && i < size - 1; addr++) { if ((c = dtrace_load8(addr)) == '\0') break; if (tokmap[c >> 3] & (1 << (c & 0x7))) break; ASSERT(i < size); dest[i++] = c; } ASSERT(i < size); dest[i] = '\0'; regs[rd] = (uintptr_t)dest; mstate->dtms_scratch_ptr += size; mstate->dtms_strtok = addr; mstate->dtms_strtok_limit = limit; break; } case DIF_SUBR_SUBSTR: { uintptr_t s = tupregs[0].dttk_value; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; char *d = (char *)mstate->dtms_scratch_ptr; int64_t index = (int64_t)tupregs[1].dttk_value; int64_t remaining = (int64_t)tupregs[2].dttk_value; size_t len = dtrace_strlen((char *)s, size); int64_t i; if (!dtrace_canload(s, len + 1, mstate, vstate)) { regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } if (nargs <= 2) remaining = (int64_t)size; if (index < 0) { index += len; if (index < 0 && index + remaining > 0) { remaining += index; index = 0; } } if (index >= len || index < 0) { remaining = 0; } else if (remaining < 0) { remaining += len - index; } else if (index + remaining > size) { remaining = size - index; } for (i = 0; i < remaining; i++) { if ((d[i] = dtrace_load8(s + index + i)) == '\0') break; } d[i] = '\0'; mstate->dtms_scratch_ptr += size; regs[rd] = (uintptr_t)d; break; } case DIF_SUBR_JSON: { uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t json = tupregs[0].dttk_value; size_t jsonlen = dtrace_strlen((char *)json, size); uintptr_t elem = tupregs[1].dttk_value; size_t elemlen = dtrace_strlen((char *)elem, size); char *dest = (char *)mstate->dtms_scratch_ptr; char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1; char *ee = elemlist; int nelems = 1; uintptr_t cur; if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) || !dtrace_canload(elem, elemlen + 1, mstate, vstate)) { regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } /* * Read the element selector and split it up into a packed list * of strings. */ for (cur = elem; cur < elem + elemlen; cur++) { char cc = dtrace_load8(cur); if (cur == elem && cc == '[') { /* * If the first element selector key is * actually an array index then ignore the * bracket. */ continue; } if (cc == ']') continue; if (cc == '.' || cc == '[') { nelems++; cc = '\0'; } *ee++ = cc; } *ee++ = '\0'; if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist, nelems, dest)) != 0) mstate->dtms_scratch_ptr += jsonlen + 1; break; } case DIF_SUBR_TOUPPER: case DIF_SUBR_TOLOWER: { uintptr_t s = tupregs[0].dttk_value; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; char *dest = (char *)mstate->dtms_scratch_ptr, c; size_t len = dtrace_strlen((char *)s, size); char lower, upper, convert; int64_t i; if (subr == DIF_SUBR_TOUPPER) { lower = 'a'; upper = 'z'; convert = 'A'; } else { lower = 'A'; upper = 'Z'; convert = 'a'; } if (!dtrace_canload(s, len + 1, mstate, vstate)) { regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } for (i = 0; i < size - 1; i++) { if ((c = dtrace_load8(s + i)) == '\0') break; if (c >= lower && c <= upper) c = convert + (c - lower); dest[i] = c; } ASSERT(i < size); dest[i] = '\0'; regs[rd] = (uintptr_t)dest; mstate->dtms_scratch_ptr += size; break; } #ifdef illumos case DIF_SUBR_GETMAJOR: #ifdef _LP64 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; #else regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; #endif break; case DIF_SUBR_GETMINOR: #ifdef _LP64 regs[rd] = tupregs[0].dttk_value & MAXMIN64; #else regs[rd] = tupregs[0].dttk_value & MAXMIN; #endif break; case DIF_SUBR_DDI_PATHNAME: { /* * This one is a galactic mess. We are going to roughly * emulate ddi_pathname(), but it's made more complicated * by the fact that we (a) want to include the minor name and * (b) must proceed iteratively instead of recursively. */ uintptr_t dest = mstate->dtms_scratch_ptr; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; char *start = (char *)dest, *end = start + size - 1; uintptr_t daddr = tupregs[0].dttk_value; int64_t minor = (int64_t)tupregs[1].dttk_value; char *s; int i, len, depth = 0; /* * Due to all the pointer jumping we do and context we must * rely upon, we just mandate that the user must have kernel * read privileges to use this routine. */ if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { *flags |= CPU_DTRACE_KPRIV; *illval = daddr; regs[rd] = 0; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } *end = '\0'; /* * We want to have a name for the minor. In order to do this, * we need to walk the minor list from the devinfo. We want * to be sure that we don't infinitely walk a circular list, * so we check for circularity by sending a scout pointer * ahead two elements for every element that we iterate over; * if the list is circular, these will ultimately point to the * same element. You may recognize this little trick as the * answer to a stupid interview question -- one that always * seems to be asked by those who had to have it laboriously * explained to them, and who can't even concisely describe * the conditions under which one would be forced to resort to * this technique. Needless to say, those conditions are * found here -- and probably only here. Is this the only use * of this infamous trick in shipping, production code? If it * isn't, it probably should be... */ if (minor != -1) { uintptr_t maddr = dtrace_loadptr(daddr + offsetof(struct dev_info, devi_minor)); uintptr_t next = offsetof(struct ddi_minor_data, next); uintptr_t name = offsetof(struct ddi_minor_data, d_minor) + offsetof(struct ddi_minor, name); uintptr_t dev = offsetof(struct ddi_minor_data, d_minor) + offsetof(struct ddi_minor, dev); uintptr_t scout; if (maddr != NULL) scout = dtrace_loadptr(maddr + next); while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { uint64_t m; #ifdef _LP64 m = dtrace_load64(maddr + dev) & MAXMIN64; #else m = dtrace_load32(maddr + dev) & MAXMIN; #endif if (m != minor) { maddr = dtrace_loadptr(maddr + next); if (scout == NULL) continue; scout = dtrace_loadptr(scout + next); if (scout == NULL) continue; scout = dtrace_loadptr(scout + next); if (scout == NULL) continue; if (scout == maddr) { *flags |= CPU_DTRACE_ILLOP; break; } continue; } /* * We have the minor data. Now we need to * copy the minor's name into the end of the * pathname. */ s = (char *)dtrace_loadptr(maddr + name); len = dtrace_strlen(s, size); if (*flags & CPU_DTRACE_FAULT) break; if (len != 0) { if ((end -= (len + 1)) < start) break; *end = ':'; } for (i = 1; i <= len; i++) end[i] = dtrace_load8((uintptr_t)s++); break; } } while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { ddi_node_state_t devi_state; devi_state = dtrace_load32(daddr + offsetof(struct dev_info, devi_node_state)); if (*flags & CPU_DTRACE_FAULT) break; if (devi_state >= DS_INITIALIZED) { s = (char *)dtrace_loadptr(daddr + offsetof(struct dev_info, devi_addr)); len = dtrace_strlen(s, size); if (*flags & CPU_DTRACE_FAULT) break; if (len != 0) { if ((end -= (len + 1)) < start) break; *end = '@'; } for (i = 1; i <= len; i++) end[i] = dtrace_load8((uintptr_t)s++); } /* * Now for the node name... */ s = (char *)dtrace_loadptr(daddr + offsetof(struct dev_info, devi_node_name)); daddr = dtrace_loadptr(daddr + offsetof(struct dev_info, devi_parent)); /* * If our parent is NULL (that is, if we're the root * node), we're going to use the special path * "devices". */ if (daddr == 0) s = "devices"; len = dtrace_strlen(s, size); if (*flags & CPU_DTRACE_FAULT) break; if ((end -= (len + 1)) < start) break; for (i = 1; i <= len; i++) end[i] = dtrace_load8((uintptr_t)s++); *end = '/'; if (depth++ > dtrace_devdepth_max) { *flags |= CPU_DTRACE_ILLOP; break; } } if (end < start) DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); if (daddr == 0) { regs[rd] = (uintptr_t)end; mstate->dtms_scratch_ptr += size; } break; } #endif case DIF_SUBR_STRJOIN: { char *d = (char *)mstate->dtms_scratch_ptr; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t s1 = tupregs[0].dttk_value; uintptr_t s2 = tupregs[1].dttk_value; int i = 0, j = 0; size_t lim1, lim2; char c; if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) || !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) { regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } for (;;) { if (i >= size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } c = (i >= lim1) ? '\0' : dtrace_load8(s1++); if ((d[i++] = c) == '\0') { i--; break; } } for (;;) { if (i >= size) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++); if ((d[i++] = c) == '\0') break; } if (i < size) { mstate->dtms_scratch_ptr += i; regs[rd] = (uintptr_t)d; } break; } case DIF_SUBR_STRTOLL: { uintptr_t s = tupregs[0].dttk_value; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; size_t lim; int base = 10; if (nargs > 1) { if ((base = tupregs[1].dttk_value) <= 1 || base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { *flags |= CPU_DTRACE_ILLOP; break; } } if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) { regs[rd] = INT64_MIN; break; } regs[rd] = dtrace_strtoll((char *)s, base, lim); break; } case DIF_SUBR_LLTOSTR: { int64_t i = (int64_t)tupregs[0].dttk_value; uint64_t val, digit; uint64_t size = 65; /* enough room for 2^64 in binary */ char *end = (char *)mstate->dtms_scratch_ptr + size - 1; int base = 10; if (nargs > 1) { if ((base = tupregs[1].dttk_value) <= 1 || base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { *flags |= CPU_DTRACE_ILLOP; break; } } val = (base == 10 && i < 0) ? i * -1 : i; if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } for (*end-- = '\0'; val; val /= base) { if ((digit = val % base) <= '9' - '0') { *end-- = '0' + digit; } else { *end-- = 'a' + (digit - ('9' - '0') - 1); } } if (i == 0 && base == 16) *end-- = '0'; if (base == 16) *end-- = 'x'; if (i == 0 || base == 8 || base == 16) *end-- = '0'; if (i < 0 && base == 10) *end-- = '-'; regs[rd] = (uintptr_t)end + 1; mstate->dtms_scratch_ptr += size; break; } case DIF_SUBR_HTONS: case DIF_SUBR_NTOHS: #if BYTE_ORDER == BIG_ENDIAN regs[rd] = (uint16_t)tupregs[0].dttk_value; #else regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); #endif break; case DIF_SUBR_HTONL: case DIF_SUBR_NTOHL: #if BYTE_ORDER == BIG_ENDIAN regs[rd] = (uint32_t)tupregs[0].dttk_value; #else regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); #endif break; case DIF_SUBR_HTONLL: case DIF_SUBR_NTOHLL: #if BYTE_ORDER == BIG_ENDIAN regs[rd] = (uint64_t)tupregs[0].dttk_value; #else regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); #endif break; case DIF_SUBR_DIRNAME: case DIF_SUBR_BASENAME: { char *dest = (char *)mstate->dtms_scratch_ptr; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t src = tupregs[0].dttk_value; int i, j, len = dtrace_strlen((char *)src, size); int lastbase = -1, firstbase = -1, lastdir = -1; int start, end; if (!dtrace_canload(src, len + 1, mstate, vstate)) { regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } /* * The basename and dirname for a zero-length string is * defined to be "." */ if (len == 0) { len = 1; src = (uintptr_t)"."; } /* * Start from the back of the string, moving back toward the * front until we see a character that isn't a slash. That * character is the last character in the basename. */ for (i = len - 1; i >= 0; i--) { if (dtrace_load8(src + i) != '/') break; } if (i >= 0) lastbase = i; /* * Starting from the last character in the basename, move * towards the front until we find a slash. The character * that we processed immediately before that is the first * character in the basename. */ for (; i >= 0; i--) { if (dtrace_load8(src + i) == '/') break; } if (i >= 0) firstbase = i + 1; /* * Now keep going until we find a non-slash character. That * character is the last character in the dirname. */ for (; i >= 0; i--) { if (dtrace_load8(src + i) != '/') break; } if (i >= 0) lastdir = i; ASSERT(!(lastbase == -1 && firstbase != -1)); ASSERT(!(firstbase == -1 && lastdir != -1)); if (lastbase == -1) { /* * We didn't find a non-slash character. We know that * the length is non-zero, so the whole string must be * slashes. In either the dirname or the basename * case, we return '/'. */ ASSERT(firstbase == -1); firstbase = lastbase = lastdir = 0; } if (firstbase == -1) { /* * The entire string consists only of a basename * component. If we're looking for dirname, we need * to change our string to be just "."; if we're * looking for a basename, we'll just set the first * character of the basename to be 0. */ if (subr == DIF_SUBR_DIRNAME) { ASSERT(lastdir == -1); src = (uintptr_t)"."; lastdir = 0; } else { firstbase = 0; } } if (subr == DIF_SUBR_DIRNAME) { if (lastdir == -1) { /* * We know that we have a slash in the name -- * or lastdir would be set to 0, above. And * because lastdir is -1, we know that this * slash must be the first character. (That * is, the full string must be of the form * "/basename".) In this case, the last * character of the directory name is 0. */ lastdir = 0; } start = 0; end = lastdir; } else { ASSERT(subr == DIF_SUBR_BASENAME); ASSERT(firstbase != -1 && lastbase != -1); start = firstbase; end = lastbase; } for (i = start, j = 0; i <= end && j < size - 1; i++, j++) dest[j] = dtrace_load8(src + i); dest[j] = '\0'; regs[rd] = (uintptr_t)dest; mstate->dtms_scratch_ptr += size; break; } case DIF_SUBR_GETF: { uintptr_t fd = tupregs[0].dttk_value; struct filedesc *fdp; file_t *fp; if (!dtrace_priv_proc(state)) { regs[rd] = 0; break; } fdp = curproc->p_fd; FILEDESC_SLOCK(fdp); fp = fget_locked(fdp, fd); mstate->dtms_getf = fp; regs[rd] = (uintptr_t)fp; FILEDESC_SUNLOCK(fdp); break; } case DIF_SUBR_CLEANPATH: { char *dest = (char *)mstate->dtms_scratch_ptr, c; uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t src = tupregs[0].dttk_value; size_t lim; int i = 0, j = 0; #ifdef illumos zone_t *z; #endif if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) { regs[rd] = 0; break; } if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } /* * Move forward, loading each character. */ do { c = (i >= lim) ? '\0' : dtrace_load8(src + i++); next: if (j + 5 >= size) /* 5 = strlen("/..c\0") */ break; if (c != '/') { dest[j++] = c; continue; } c = (i >= lim) ? '\0' : dtrace_load8(src + i++); if (c == '/') { /* * We have two slashes -- we can just advance * to the next character. */ goto next; } if (c != '.') { /* * This is not "." and it's not ".." -- we can * just store the "/" and this character and * drive on. */ dest[j++] = '/'; dest[j++] = c; continue; } c = (i >= lim) ? '\0' : dtrace_load8(src + i++); if (c == '/') { /* * This is a "/./" component. We're not going * to store anything in the destination buffer; * we're just going to go to the next component. */ goto next; } if (c != '.') { /* * This is not ".." -- we can just store the * "/." and this character and continue * processing. */ dest[j++] = '/'; dest[j++] = '.'; dest[j++] = c; continue; } c = (i >= lim) ? '\0' : dtrace_load8(src + i++); if (c != '/' && c != '\0') { /* * This is not ".." -- it's "..[mumble]". * We'll store the "/.." and this character * and continue processing. */ dest[j++] = '/'; dest[j++] = '.'; dest[j++] = '.'; dest[j++] = c; continue; } /* * This is "/../" or "/..\0". We need to back up * our destination pointer until we find a "/". */ i--; while (j != 0 && dest[--j] != '/') continue; if (c == '\0') dest[++j] = '/'; } while (c != '\0'); dest[j] = '\0'; #ifdef illumos if (mstate->dtms_getf != NULL && !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) && (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) { /* * If we've done a getf() as a part of this ECB and we * don't have kernel access (and we're not in the global * zone), check if the path we cleaned up begins with * the zone's root path, and trim it off if so. Note * that this is an output cleanliness issue, not a * security issue: knowing one's zone root path does * not enable privilege escalation. */ if (strstr(dest, z->zone_rootpath) == dest) dest += strlen(z->zone_rootpath) - 1; } #endif regs[rd] = (uintptr_t)dest; mstate->dtms_scratch_ptr += size; break; } case DIF_SUBR_INET_NTOA: case DIF_SUBR_INET_NTOA6: case DIF_SUBR_INET_NTOP: { size_t size; int af, argi, i; char *base, *end; if (subr == DIF_SUBR_INET_NTOP) { af = (int)tupregs[0].dttk_value; argi = 1; } else { af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; argi = 0; } if (af == AF_INET) { ipaddr_t ip4; uint8_t *ptr8, val; if (!dtrace_canload(tupregs[argi].dttk_value, sizeof (ipaddr_t), mstate, vstate)) { regs[rd] = 0; break; } /* * Safely load the IPv4 address. */ ip4 = dtrace_load32(tupregs[argi].dttk_value); /* * Check an IPv4 string will fit in scratch. */ size = INET_ADDRSTRLEN; if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } base = (char *)mstate->dtms_scratch_ptr; end = (char *)mstate->dtms_scratch_ptr + size - 1; /* * Stringify as a dotted decimal quad. */ *end-- = '\0'; ptr8 = (uint8_t *)&ip4; for (i = 3; i >= 0; i--) { val = ptr8[i]; if (val == 0) { *end-- = '0'; } else { for (; val; val /= 10) { *end-- = '0' + (val % 10); } } if (i > 0) *end-- = '.'; } ASSERT(end + 1 >= base); } else if (af == AF_INET6) { struct in6_addr ip6; int firstzero, tryzero, numzero, v6end; uint16_t val; const char digits[] = "0123456789abcdef"; /* * Stringify using RFC 1884 convention 2 - 16 bit * hexadecimal values with a zero-run compression. * Lower case hexadecimal digits are used. * eg, fe80::214:4fff:fe0b:76c8. * The IPv4 embedded form is returned for inet_ntop, * just the IPv4 string is returned for inet_ntoa6. */ if (!dtrace_canload(tupregs[argi].dttk_value, sizeof (struct in6_addr), mstate, vstate)) { regs[rd] = 0; break; } /* * Safely load the IPv6 address. */ dtrace_bcopy( (void *)(uintptr_t)tupregs[argi].dttk_value, (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); /* * Check an IPv6 string will fit in scratch. */ size = INET6_ADDRSTRLEN; if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } base = (char *)mstate->dtms_scratch_ptr; end = (char *)mstate->dtms_scratch_ptr + size - 1; *end-- = '\0'; /* * Find the longest run of 16 bit zero values * for the single allowed zero compression - "::". */ firstzero = -1; tryzero = -1; numzero = 1; for (i = 0; i < sizeof (struct in6_addr); i++) { #ifdef illumos if (ip6._S6_un._S6_u8[i] == 0 && #else if (ip6.__u6_addr.__u6_addr8[i] == 0 && #endif tryzero == -1 && i % 2 == 0) { tryzero = i; continue; } if (tryzero != -1 && #ifdef illumos (ip6._S6_un._S6_u8[i] != 0 || #else (ip6.__u6_addr.__u6_addr8[i] != 0 || #endif i == sizeof (struct in6_addr) - 1)) { if (i - tryzero <= numzero) { tryzero = -1; continue; } firstzero = tryzero; numzero = i - i % 2 - tryzero; tryzero = -1; #ifdef illumos if (ip6._S6_un._S6_u8[i] == 0 && #else if (ip6.__u6_addr.__u6_addr8[i] == 0 && #endif i == sizeof (struct in6_addr) - 1) numzero += 2; } } ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); /* * Check for an IPv4 embedded address. */ v6end = sizeof (struct in6_addr) - 2; if (IN6_IS_ADDR_V4MAPPED(&ip6) || IN6_IS_ADDR_V4COMPAT(&ip6)) { for (i = sizeof (struct in6_addr) - 1; i >= DTRACE_V4MAPPED_OFFSET; i--) { ASSERT(end >= base); #ifdef illumos val = ip6._S6_un._S6_u8[i]; #else val = ip6.__u6_addr.__u6_addr8[i]; #endif if (val == 0) { *end-- = '0'; } else { for (; val; val /= 10) { *end-- = '0' + val % 10; } } if (i > DTRACE_V4MAPPED_OFFSET) *end-- = '.'; } if (subr == DIF_SUBR_INET_NTOA6) goto inetout; /* * Set v6end to skip the IPv4 address that * we have already stringified. */ v6end = 10; } /* * Build the IPv6 string by working through the * address in reverse. */ for (i = v6end; i >= 0; i -= 2) { ASSERT(end >= base); if (i == firstzero + numzero - 2) { *end-- = ':'; *end-- = ':'; i -= numzero - 2; continue; } if (i < 14 && i != firstzero - 2) *end-- = ':'; #ifdef illumos val = (ip6._S6_un._S6_u8[i] << 8) + ip6._S6_un._S6_u8[i + 1]; #else val = (ip6.__u6_addr.__u6_addr8[i] << 8) + ip6.__u6_addr.__u6_addr8[i + 1]; #endif if (val == 0) { *end-- = '0'; } else { for (; val; val /= 16) { *end-- = digits[val % 16]; } } } ASSERT(end + 1 >= base); } else { /* * The user didn't use AH_INET or AH_INET6. */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); regs[rd] = 0; break; } inetout: regs[rd] = (uintptr_t)end + 1; mstate->dtms_scratch_ptr += size; break; } case DIF_SUBR_MEMREF: { uintptr_t size = 2 * sizeof(uintptr_t); uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; /* address and length */ memref[0] = tupregs[0].dttk_value; memref[1] = tupregs[1].dttk_value; regs[rd] = (uintptr_t) memref; mstate->dtms_scratch_ptr += scratch_size; break; } #ifndef illumos case DIF_SUBR_MEMSTR: { char *str = (char *)mstate->dtms_scratch_ptr; uintptr_t mem = tupregs[0].dttk_value; char c = tupregs[1].dttk_value; size_t size = tupregs[2].dttk_value; uint8_t n; int i; regs[rd] = 0; if (size == 0) break; if (!dtrace_canload(mem, size - 1, mstate, vstate)) break; if (!DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); break; } if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) { *flags |= CPU_DTRACE_ILLOP; break; } for (i = 0; i < size - 1; i++) { n = dtrace_load8(mem++); str[i] = (n == 0) ? c : n; } str[size - 1] = 0; regs[rd] = (uintptr_t)str; mstate->dtms_scratch_ptr += size; break; } #endif } } /* * Emulate the execution of DTrace IR instructions specified by the given * DIF object. This function is deliberately void of assertions as all of * the necessary checks are handled by a call to dtrace_difo_validate(). */ static uint64_t dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, dtrace_state_t *state) { const dif_instr_t *text = difo->dtdo_buf; const uint_t textlen = difo->dtdo_len; const char *strtab = difo->dtdo_strtab; const uint64_t *inttab = difo->dtdo_inttab; uint64_t rval = 0; dtrace_statvar_t *svar; dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; dtrace_difv_t *v; volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ uint64_t regs[DIF_DIR_NREGS]; uint64_t *tmp; uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; int64_t cc_r; uint_t pc = 0, id, opc = 0; uint8_t ttop = 0; dif_instr_t instr; uint_t r1, r2, rd; /* * We stash the current DIF object into the machine state: we need it * for subsequent access checking. */ mstate->dtms_difo = difo; regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { opc = pc; instr = text[pc++]; r1 = DIF_INSTR_R1(instr); r2 = DIF_INSTR_R2(instr); rd = DIF_INSTR_RD(instr); switch (DIF_INSTR_OP(instr)) { case DIF_OP_OR: regs[rd] = regs[r1] | regs[r2]; break; case DIF_OP_XOR: regs[rd] = regs[r1] ^ regs[r2]; break; case DIF_OP_AND: regs[rd] = regs[r1] & regs[r2]; break; case DIF_OP_SLL: regs[rd] = regs[r1] << regs[r2]; break; case DIF_OP_SRL: regs[rd] = regs[r1] >> regs[r2]; break; case DIF_OP_SUB: regs[rd] = regs[r1] - regs[r2]; break; case DIF_OP_ADD: regs[rd] = regs[r1] + regs[r2]; break; case DIF_OP_MUL: regs[rd] = regs[r1] * regs[r2]; break; case DIF_OP_SDIV: if (regs[r2] == 0) { regs[rd] = 0; *flags |= CPU_DTRACE_DIVZERO; } else { regs[rd] = (int64_t)regs[r1] / (int64_t)regs[r2]; } break; case DIF_OP_UDIV: if (regs[r2] == 0) { regs[rd] = 0; *flags |= CPU_DTRACE_DIVZERO; } else { regs[rd] = regs[r1] / regs[r2]; } break; case DIF_OP_SREM: if (regs[r2] == 0) { regs[rd] = 0; *flags |= CPU_DTRACE_DIVZERO; } else { regs[rd] = (int64_t)regs[r1] % (int64_t)regs[r2]; } break; case DIF_OP_UREM: if (regs[r2] == 0) { regs[rd] = 0; *flags |= CPU_DTRACE_DIVZERO; } else { regs[rd] = regs[r1] % regs[r2]; } break; case DIF_OP_NOT: regs[rd] = ~regs[r1]; break; case DIF_OP_MOV: regs[rd] = regs[r1]; break; case DIF_OP_CMP: cc_r = regs[r1] - regs[r2]; cc_n = cc_r < 0; cc_z = cc_r == 0; cc_v = 0; cc_c = regs[r1] < regs[r2]; break; case DIF_OP_TST: cc_n = cc_v = cc_c = 0; cc_z = regs[r1] == 0; break; case DIF_OP_BA: pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BE: if (cc_z) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BNE: if (cc_z == 0) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BG: if ((cc_z | (cc_n ^ cc_v)) == 0) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BGU: if ((cc_c | cc_z) == 0) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BGE: if ((cc_n ^ cc_v) == 0) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BGEU: if (cc_c == 0) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BL: if (cc_n ^ cc_v) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BLU: if (cc_c) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BLE: if (cc_z | (cc_n ^ cc_v)) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_BLEU: if (cc_c | cc_z) pc = DIF_INSTR_LABEL(instr); break; case DIF_OP_RLDSB: if (!dtrace_canload(regs[r1], 1, mstate, vstate)) break; /*FALLTHROUGH*/ case DIF_OP_LDSB: regs[rd] = (int8_t)dtrace_load8(regs[r1]); break; case DIF_OP_RLDSH: if (!dtrace_canload(regs[r1], 2, mstate, vstate)) break; /*FALLTHROUGH*/ case DIF_OP_LDSH: regs[rd] = (int16_t)dtrace_load16(regs[r1]); break; case DIF_OP_RLDSW: if (!dtrace_canload(regs[r1], 4, mstate, vstate)) break; /*FALLTHROUGH*/ case DIF_OP_LDSW: regs[rd] = (int32_t)dtrace_load32(regs[r1]); break; case DIF_OP_RLDUB: if (!dtrace_canload(regs[r1], 1, mstate, vstate)) break; /*FALLTHROUGH*/ case DIF_OP_LDUB: regs[rd] = dtrace_load8(regs[r1]); break; case DIF_OP_RLDUH: if (!dtrace_canload(regs[r1], 2, mstate, vstate)) break; /*FALLTHROUGH*/ case DIF_OP_LDUH: regs[rd] = dtrace_load16(regs[r1]); break; case DIF_OP_RLDUW: if (!dtrace_canload(regs[r1], 4, mstate, vstate)) break; /*FALLTHROUGH*/ case DIF_OP_LDUW: regs[rd] = dtrace_load32(regs[r1]); break; case DIF_OP_RLDX: if (!dtrace_canload(regs[r1], 8, mstate, vstate)) break; /*FALLTHROUGH*/ case DIF_OP_LDX: regs[rd] = dtrace_load64(regs[r1]); break; case DIF_OP_ULDSB: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = (int8_t) dtrace_fuword8((void *)(uintptr_t)regs[r1]); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_OP_ULDSH: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = (int16_t) dtrace_fuword16((void *)(uintptr_t)regs[r1]); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_OP_ULDSW: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = (int32_t) dtrace_fuword32((void *)(uintptr_t)regs[r1]); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_OP_ULDUB: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = dtrace_fuword8((void *)(uintptr_t)regs[r1]); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_OP_ULDUH: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = dtrace_fuword16((void *)(uintptr_t)regs[r1]); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_OP_ULDUW: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = dtrace_fuword32((void *)(uintptr_t)regs[r1]); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_OP_ULDX: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); regs[rd] = dtrace_fuword64((void *)(uintptr_t)regs[r1]); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); break; case DIF_OP_RET: rval = regs[rd]; pc = textlen; break; case DIF_OP_NOP: break; case DIF_OP_SETX: regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; break; case DIF_OP_SETS: regs[rd] = (uint64_t)(uintptr_t) (strtab + DIF_INSTR_STRING(instr)); break; case DIF_OP_SCMP: { size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; uintptr_t s1 = regs[r1]; uintptr_t s2 = regs[r2]; size_t lim1, lim2; if (s1 != 0 && !dtrace_strcanload(s1, sz, &lim1, mstate, vstate)) break; if (s2 != 0 && !dtrace_strcanload(s2, sz, &lim2, mstate, vstate)) break; cc_r = dtrace_strncmp((char *)s1, (char *)s2, MIN(lim1, lim2)); cc_n = cc_r < 0; cc_z = cc_r == 0; cc_v = cc_c = 0; break; } case DIF_OP_LDGA: regs[rd] = dtrace_dif_variable(mstate, state, r1, regs[r2]); break; case DIF_OP_LDGS: id = DIF_INSTR_VAR(instr); if (id >= DIF_VAR_OTHER_UBASE) { uintptr_t a; id -= DIF_VAR_OTHER_UBASE; svar = vstate->dtvs_globals[id]; ASSERT(svar != NULL); v = &svar->dtsv_var; if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { regs[rd] = svar->dtsv_data; break; } a = (uintptr_t)svar->dtsv_data; if (*(uint8_t *)a == UINT8_MAX) { /* * If the 0th byte is set to UINT8_MAX * then this is to be treated as a * reference to a NULL variable. */ regs[rd] = 0; } else { regs[rd] = a + sizeof (uint64_t); } break; } regs[rd] = dtrace_dif_variable(mstate, state, id, 0); break; case DIF_OP_STGS: id = DIF_INSTR_VAR(instr); ASSERT(id >= DIF_VAR_OTHER_UBASE); id -= DIF_VAR_OTHER_UBASE; VERIFY(id < vstate->dtvs_nglobals); svar = vstate->dtvs_globals[id]; ASSERT(svar != NULL); v = &svar->dtsv_var; if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { uintptr_t a = (uintptr_t)svar->dtsv_data; size_t lim; ASSERT(a != 0); ASSERT(svar->dtsv_size != 0); if (regs[rd] == 0) { *(uint8_t *)a = UINT8_MAX; break; } else { *(uint8_t *)a = 0; a += sizeof (uint64_t); } if (!dtrace_vcanload( (void *)(uintptr_t)regs[rd], &v->dtdv_type, &lim, mstate, vstate)) break; dtrace_vcopy((void *)(uintptr_t)regs[rd], (void *)a, &v->dtdv_type, lim); break; } svar->dtsv_data = regs[rd]; break; case DIF_OP_LDTA: /* * There are no DTrace built-in thread-local arrays at * present. This opcode is saved for future work. */ *flags |= CPU_DTRACE_ILLOP; regs[rd] = 0; break; case DIF_OP_LDLS: id = DIF_INSTR_VAR(instr); if (id < DIF_VAR_OTHER_UBASE) { /* * For now, this has no meaning. */ regs[rd] = 0; break; } id -= DIF_VAR_OTHER_UBASE; ASSERT(id < vstate->dtvs_nlocals); ASSERT(vstate->dtvs_locals != NULL); svar = vstate->dtvs_locals[id]; ASSERT(svar != NULL); v = &svar->dtsv_var; if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { uintptr_t a = (uintptr_t)svar->dtsv_data; size_t sz = v->dtdv_type.dtdt_size; size_t lim; sz += sizeof (uint64_t); ASSERT(svar->dtsv_size == NCPU * sz); a += curcpu * sz; if (*(uint8_t *)a == UINT8_MAX) { /* * If the 0th byte is set to UINT8_MAX * then this is to be treated as a * reference to a NULL variable. */ regs[rd] = 0; } else { regs[rd] = a + sizeof (uint64_t); } break; } ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; regs[rd] = tmp[curcpu]; break; case DIF_OP_STLS: id = DIF_INSTR_VAR(instr); ASSERT(id >= DIF_VAR_OTHER_UBASE); id -= DIF_VAR_OTHER_UBASE; VERIFY(id < vstate->dtvs_nlocals); ASSERT(vstate->dtvs_locals != NULL); svar = vstate->dtvs_locals[id]; ASSERT(svar != NULL); v = &svar->dtsv_var; if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { uintptr_t a = (uintptr_t)svar->dtsv_data; size_t sz = v->dtdv_type.dtdt_size; size_t lim; sz += sizeof (uint64_t); ASSERT(svar->dtsv_size == NCPU * sz); a += curcpu * sz; if (regs[rd] == 0) { *(uint8_t *)a = UINT8_MAX; break; } else { *(uint8_t *)a = 0; a += sizeof (uint64_t); } if (!dtrace_vcanload( (void *)(uintptr_t)regs[rd], &v->dtdv_type, &lim, mstate, vstate)) break; dtrace_vcopy((void *)(uintptr_t)regs[rd], (void *)a, &v->dtdv_type, lim); break; } ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; tmp[curcpu] = regs[rd]; break; case DIF_OP_LDTS: { dtrace_dynvar_t *dvar; dtrace_key_t *key; id = DIF_INSTR_VAR(instr); ASSERT(id >= DIF_VAR_OTHER_UBASE); id -= DIF_VAR_OTHER_UBASE; v = &vstate->dtvs_tlocals[id]; key = &tupregs[DIF_DTR_NREGS]; key[0].dttk_value = (uint64_t)id; key[0].dttk_size = 0; DTRACE_TLS_THRKEY(key[1].dttk_value); key[1].dttk_size = 0; dvar = dtrace_dynvar(dstate, 2, key, sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, mstate, vstate); if (dvar == NULL) { regs[rd] = 0; break; } if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; } else { regs[rd] = *((uint64_t *)dvar->dtdv_data); } break; } case DIF_OP_STTS: { dtrace_dynvar_t *dvar; dtrace_key_t *key; id = DIF_INSTR_VAR(instr); ASSERT(id >= DIF_VAR_OTHER_UBASE); id -= DIF_VAR_OTHER_UBASE; VERIFY(id < vstate->dtvs_ntlocals); key = &tupregs[DIF_DTR_NREGS]; key[0].dttk_value = (uint64_t)id; key[0].dttk_size = 0; DTRACE_TLS_THRKEY(key[1].dttk_value); key[1].dttk_size = 0; v = &vstate->dtvs_tlocals[id]; dvar = dtrace_dynvar(dstate, 2, key, v->dtdv_type.dtdt_size > sizeof (uint64_t) ? v->dtdv_type.dtdt_size : sizeof (uint64_t), regs[rd] ? DTRACE_DYNVAR_ALLOC : DTRACE_DYNVAR_DEALLOC, mstate, vstate); /* * Given that we're storing to thread-local data, * we need to flush our predicate cache. */ curthread->t_predcache = 0; if (dvar == NULL) break; if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { size_t lim; if (!dtrace_vcanload( (void *)(uintptr_t)regs[rd], &v->dtdv_type, &lim, mstate, vstate)) break; dtrace_vcopy((void *)(uintptr_t)regs[rd], dvar->dtdv_data, &v->dtdv_type, lim); } else { *((uint64_t *)dvar->dtdv_data) = regs[rd]; } break; } case DIF_OP_SRA: regs[rd] = (int64_t)regs[r1] >> regs[r2]; break; case DIF_OP_CALL: dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, regs, tupregs, ttop, mstate, state); break; case DIF_OP_PUSHTR: if (ttop == DIF_DTR_NREGS) { *flags |= CPU_DTRACE_TUPOFLOW; break; } if (r1 == DIF_TYPE_STRING) { /* * If this is a string type and the size is 0, * we'll use the system-wide default string * size. Note that we are _not_ looking at * the value of the DTRACEOPT_STRSIZE option; * had this been set, we would expect to have * a non-zero size value in the "pushtr". */ tupregs[ttop].dttk_size = dtrace_strlen((char *)(uintptr_t)regs[rd], regs[r2] ? regs[r2] : dtrace_strsize_default) + 1; } else { if (regs[r2] > LONG_MAX) { *flags |= CPU_DTRACE_ILLOP; break; } tupregs[ttop].dttk_size = regs[r2]; } tupregs[ttop++].dttk_value = regs[rd]; break; case DIF_OP_PUSHTV: if (ttop == DIF_DTR_NREGS) { *flags |= CPU_DTRACE_TUPOFLOW; break; } tupregs[ttop].dttk_value = regs[rd]; tupregs[ttop++].dttk_size = 0; break; case DIF_OP_POPTS: if (ttop != 0) ttop--; break; case DIF_OP_FLUSHTS: ttop = 0; break; case DIF_OP_LDGAA: case DIF_OP_LDTAA: { dtrace_dynvar_t *dvar; dtrace_key_t *key = tupregs; uint_t nkeys = ttop; id = DIF_INSTR_VAR(instr); ASSERT(id >= DIF_VAR_OTHER_UBASE); id -= DIF_VAR_OTHER_UBASE; key[nkeys].dttk_value = (uint64_t)id; key[nkeys++].dttk_size = 0; if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { DTRACE_TLS_THRKEY(key[nkeys].dttk_value); key[nkeys++].dttk_size = 0; VERIFY(id < vstate->dtvs_ntlocals); v = &vstate->dtvs_tlocals[id]; } else { VERIFY(id < vstate->dtvs_nglobals); v = &vstate->dtvs_globals[id]->dtsv_var; } dvar = dtrace_dynvar(dstate, nkeys, key, v->dtdv_type.dtdt_size > sizeof (uint64_t) ? v->dtdv_type.dtdt_size : sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, mstate, vstate); if (dvar == NULL) { regs[rd] = 0; break; } if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; } else { regs[rd] = *((uint64_t *)dvar->dtdv_data); } break; } case DIF_OP_STGAA: case DIF_OP_STTAA: { dtrace_dynvar_t *dvar; dtrace_key_t *key = tupregs; uint_t nkeys = ttop; id = DIF_INSTR_VAR(instr); ASSERT(id >= DIF_VAR_OTHER_UBASE); id -= DIF_VAR_OTHER_UBASE; key[nkeys].dttk_value = (uint64_t)id; key[nkeys++].dttk_size = 0; if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { DTRACE_TLS_THRKEY(key[nkeys].dttk_value); key[nkeys++].dttk_size = 0; VERIFY(id < vstate->dtvs_ntlocals); v = &vstate->dtvs_tlocals[id]; } else { VERIFY(id < vstate->dtvs_nglobals); v = &vstate->dtvs_globals[id]->dtsv_var; } dvar = dtrace_dynvar(dstate, nkeys, key, v->dtdv_type.dtdt_size > sizeof (uint64_t) ? v->dtdv_type.dtdt_size : sizeof (uint64_t), regs[rd] ? DTRACE_DYNVAR_ALLOC : DTRACE_DYNVAR_DEALLOC, mstate, vstate); if (dvar == NULL) break; if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { size_t lim; if (!dtrace_vcanload( (void *)(uintptr_t)regs[rd], &v->dtdv_type, &lim, mstate, vstate)) break; dtrace_vcopy((void *)(uintptr_t)regs[rd], dvar->dtdv_data, &v->dtdv_type, lim); } else { *((uint64_t *)dvar->dtdv_data) = regs[rd]; } break; } case DIF_OP_ALLOCS: { uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; /* * Rounding up the user allocation size could have * overflowed large, bogus allocations (like -1ULL) to * 0. */ if (size < regs[r1] || !DTRACE_INSCRATCH(mstate, size)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); regs[rd] = 0; break; } dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); mstate->dtms_scratch_ptr += size; regs[rd] = ptr; break; } case DIF_OP_COPYS: if (!dtrace_canstore(regs[rd], regs[r2], mstate, vstate)) { *flags |= CPU_DTRACE_BADADDR; *illval = regs[rd]; break; } if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) break; dtrace_bcopy((void *)(uintptr_t)regs[r1], (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); break; case DIF_OP_STB: if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { *flags |= CPU_DTRACE_BADADDR; *illval = regs[rd]; break; } *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; break; case DIF_OP_STH: if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { *flags |= CPU_DTRACE_BADADDR; *illval = regs[rd]; break; } if (regs[rd] & 1) { *flags |= CPU_DTRACE_BADALIGN; *illval = regs[rd]; break; } *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; break; case DIF_OP_STW: if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { *flags |= CPU_DTRACE_BADADDR; *illval = regs[rd]; break; } if (regs[rd] & 3) { *flags |= CPU_DTRACE_BADALIGN; *illval = regs[rd]; break; } *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; break; case DIF_OP_STX: if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { *flags |= CPU_DTRACE_BADADDR; *illval = regs[rd]; break; } if (regs[rd] & 7) { *flags |= CPU_DTRACE_BADALIGN; *illval = regs[rd]; break; } *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; break; } } if (!(*flags & CPU_DTRACE_FAULT)) return (rval); mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; return (0); } static void dtrace_action_breakpoint(dtrace_ecb_t *ecb) { dtrace_probe_t *probe = ecb->dte_probe; dtrace_provider_t *prov = probe->dtpr_provider; char c[DTRACE_FULLNAMELEN + 80], *str; char *msg = "dtrace: breakpoint action at probe "; char *ecbmsg = " (ecb "; uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); uintptr_t val = (uintptr_t)ecb; int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; if (dtrace_destructive_disallow) return; /* * It's impossible to be taking action on the NULL probe. */ ASSERT(probe != NULL); /* * This is a poor man's (destitute man's?) sprintf(): we want to * print the provider name, module name, function name and name of * the probe, along with the hex address of the ECB with the breakpoint * action -- all of which we must place in the character buffer by * hand. */ while (*msg != '\0') c[i++] = *msg++; for (str = prov->dtpv_name; *str != '\0'; str++) c[i++] = *str; c[i++] = ':'; for (str = probe->dtpr_mod; *str != '\0'; str++) c[i++] = *str; c[i++] = ':'; for (str = probe->dtpr_func; *str != '\0'; str++) c[i++] = *str; c[i++] = ':'; for (str = probe->dtpr_name; *str != '\0'; str++) c[i++] = *str; while (*ecbmsg != '\0') c[i++] = *ecbmsg++; while (shift >= 0) { mask = (uintptr_t)0xf << shift; if (val >= ((uintptr_t)1 << shift)) c[i++] = "0123456789abcdef"[(val & mask) >> shift]; shift -= 4; } c[i++] = ')'; c[i] = '\0'; #ifdef illumos debug_enter(c); #else kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); #endif } static void dtrace_action_panic(dtrace_ecb_t *ecb) { dtrace_probe_t *probe = ecb->dte_probe; /* * It's impossible to be taking action on the NULL probe. */ ASSERT(probe != NULL); if (dtrace_destructive_disallow) return; if (dtrace_panicked != NULL) return; if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) return; /* * We won the right to panic. (We want to be sure that only one * thread calls panic() from dtrace_probe(), and that panic() is * called exactly once.) */ dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", probe->dtpr_provider->dtpv_name, probe->dtpr_mod, probe->dtpr_func, probe->dtpr_name, (void *)ecb); } static void dtrace_action_raise(uint64_t sig) { if (dtrace_destructive_disallow) return; if (sig >= NSIG) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); return; } #ifdef illumos /* * raise() has a queue depth of 1 -- we ignore all subsequent * invocations of the raise() action. */ if (curthread->t_dtrace_sig == 0) curthread->t_dtrace_sig = (uint8_t)sig; curthread->t_sig_check = 1; aston(curthread); #else struct proc *p = curproc; PROC_LOCK(p); kern_psignal(p, sig); PROC_UNLOCK(p); #endif } static void dtrace_action_stop(void) { if (dtrace_destructive_disallow) return; #ifdef illumos if (!curthread->t_dtrace_stop) { curthread->t_dtrace_stop = 1; curthread->t_sig_check = 1; aston(curthread); } #else struct proc *p = curproc; PROC_LOCK(p); kern_psignal(p, SIGSTOP); PROC_UNLOCK(p); #endif } static void dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) { hrtime_t now; volatile uint16_t *flags; #ifdef illumos cpu_t *cpu = CPU; #else cpu_t *cpu = &solaris_cpu[curcpu]; #endif if (dtrace_destructive_disallow) return; flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; now = dtrace_gethrtime(); if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { /* * We need to advance the mark to the current time. */ cpu->cpu_dtrace_chillmark = now; cpu->cpu_dtrace_chilled = 0; } /* * Now check to see if the requested chill time would take us over * the maximum amount of time allowed in the chill interval. (Or * worse, if the calculation itself induces overflow.) */ if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { *flags |= CPU_DTRACE_ILLOP; return; } while (dtrace_gethrtime() - now < val) continue; /* * Normally, we assure that the value of the variable "timestamp" does * not change within an ECB. The presence of chill() represents an * exception to this rule, however. */ mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; cpu->cpu_dtrace_chilled += val; } static void dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t *buf, uint64_t arg) { int nframes = DTRACE_USTACK_NFRAMES(arg); int strsize = DTRACE_USTACK_STRSIZE(arg); uint64_t *pcs = &buf[1], *fps; char *str = (char *)&pcs[nframes]; int size, offs = 0, i, j; size_t rem; uintptr_t old = mstate->dtms_scratch_ptr, saved; uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; char *sym; /* * Should be taking a faster path if string space has not been * allocated. */ ASSERT(strsize != 0); /* * We will first allocate some temporary space for the frame pointers. */ fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); size = (uintptr_t)fps - mstate->dtms_scratch_ptr + (nframes * sizeof (uint64_t)); if (!DTRACE_INSCRATCH(mstate, size)) { /* * Not enough room for our frame pointers -- need to indicate * that we ran out of scratch space. */ DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); return; } mstate->dtms_scratch_ptr += size; saved = mstate->dtms_scratch_ptr; /* * Now get a stack with both program counters and frame pointers. */ DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_getufpstack(buf, fps, nframes + 1); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); /* * If that faulted, we're cooked. */ if (*flags & CPU_DTRACE_FAULT) goto out; /* * Now we want to walk up the stack, calling the USTACK helper. For * each iteration, we restore the scratch pointer. */ for (i = 0; i < nframes; i++) { mstate->dtms_scratch_ptr = saved; if (offs >= strsize) break; sym = (char *)(uintptr_t)dtrace_helper( DTRACE_HELPER_ACTION_USTACK, mstate, state, pcs[i], fps[i]); /* * If we faulted while running the helper, we're going to * clear the fault and null out the corresponding string. */ if (*flags & CPU_DTRACE_FAULT) { *flags &= ~CPU_DTRACE_FAULT; str[offs++] = '\0'; continue; } if (sym == NULL) { str[offs++] = '\0'; continue; } if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate, &(state->dts_vstate))) { str[offs++] = '\0'; continue; } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); /* * Now copy in the string that the helper returned to us. */ for (j = 0; offs + j < strsize && j < rem; j++) { if ((str[offs + j] = sym[j]) == '\0') break; } DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); offs += j + 1; } if (offs >= strsize) { /* * If we didn't have room for all of the strings, we don't * abort processing -- this needn't be a fatal error -- but we * still want to increment a counter (dts_stkstroverflows) to * allow this condition to be warned about. (If this is from * a jstack() action, it is easily tuned via jstackstrsize.) */ dtrace_error(&state->dts_stkstroverflows); } while (offs < strsize) str[offs++] = '\0'; out: mstate->dtms_scratch_ptr = old; } static void dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) { volatile uint16_t *flags; uint64_t val = *valp; size_t valoffs = *valoffsp; flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF); /* * If this is a string, we're going to only load until we find the zero * byte -- after which we'll store zero bytes. */ if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { char c = '\0' + 1; size_t s; for (s = 0; s < size; s++) { if (c != '\0' && dtkind == DIF_TF_BYREF) { c = dtrace_load8(val++); } else if (c != '\0' && dtkind == DIF_TF_BYUREF) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); c = dtrace_fuword8((void *)(uintptr_t)val++); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); if (*flags & CPU_DTRACE_FAULT) break; } DTRACE_STORE(uint8_t, tomax, valoffs++, c); if (c == '\0' && intuple) break; } } else { uint8_t c; while (valoffs < end) { if (dtkind == DIF_TF_BYREF) { c = dtrace_load8(val++); } else if (dtkind == DIF_TF_BYUREF) { DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); c = dtrace_fuword8((void *)(uintptr_t)val++); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); if (*flags & CPU_DTRACE_FAULT) break; } DTRACE_STORE(uint8_t, tomax, valoffs++, c); } } *valp = val; *valoffsp = valoffs; } /* * If you're looking for the epicenter of DTrace, you just found it. This * is the function called by the provider to fire a probe -- from which all * subsequent probe-context DTrace activity emanates. */ void dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) { processorid_t cpuid; dtrace_icookie_t cookie; dtrace_probe_t *probe; dtrace_mstate_t mstate; dtrace_ecb_t *ecb; dtrace_action_t *act; intptr_t offs; size_t size; int vtime, onintr; volatile uint16_t *flags; hrtime_t now; if (panicstr != NULL) return; #ifdef illumos /* * Kick out immediately if this CPU is still being born (in which case * curthread will be set to -1) or the current thread can't allow * probes in its current context. */ if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) return; #endif cookie = dtrace_interrupt_disable(); probe = dtrace_probes[id - 1]; cpuid = curcpu; onintr = CPU_ON_INTR(CPU); if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && probe->dtpr_predcache == curthread->t_predcache) { /* * We have hit in the predicate cache; we know that * this predicate would evaluate to be false. */ dtrace_interrupt_enable(cookie); return; } #ifdef illumos if (panic_quiesce) { #else if (panicstr != NULL) { #endif /* * We don't trace anything if we're panicking. */ dtrace_interrupt_enable(cookie); return; } now = mstate.dtms_timestamp = dtrace_gethrtime(); mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; vtime = dtrace_vtime_references != 0; if (vtime && curthread->t_dtrace_start) curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; mstate.dtms_difo = NULL; mstate.dtms_probe = probe; mstate.dtms_strtok = 0; mstate.dtms_arg[0] = arg0; mstate.dtms_arg[1] = arg1; mstate.dtms_arg[2] = arg2; mstate.dtms_arg[3] = arg3; mstate.dtms_arg[4] = arg4; flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { dtrace_predicate_t *pred = ecb->dte_predicate; dtrace_state_t *state = ecb->dte_state; dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; dtrace_vstate_t *vstate = &state->dts_vstate; dtrace_provider_t *prov = probe->dtpr_provider; uint64_t tracememsize = 0; int committed = 0; caddr_t tomax; /* * A little subtlety with the following (seemingly innocuous) * declaration of the automatic 'val': by looking at the * code, you might think that it could be declared in the * action processing loop, below. (That is, it's only used in * the action processing loop.) However, it must be declared * out of that scope because in the case of DIF expression * arguments to aggregating actions, one iteration of the * action loop will use the last iteration's value. */ uint64_t val = 0; mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; mstate.dtms_getf = NULL; *flags &= ~CPU_DTRACE_ERROR; if (prov == dtrace_provider) { /* * If dtrace itself is the provider of this probe, * we're only going to continue processing the ECB if * arg0 (the dtrace_state_t) is equal to the ECB's * creating state. (This prevents disjoint consumers * from seeing one another's metaprobes.) */ if (arg0 != (uint64_t)(uintptr_t)state) continue; } if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { /* * We're not currently active. If our provider isn't * the dtrace pseudo provider, we're not interested. */ if (prov != dtrace_provider) continue; /* * Now we must further check if we are in the BEGIN * probe. If we are, we will only continue processing * if we're still in WARMUP -- if one BEGIN enabling * has invoked the exit() action, we don't want to * evaluate subsequent BEGIN enablings. */ if (probe->dtpr_id == dtrace_probeid_begin && state->dts_activity != DTRACE_ACTIVITY_WARMUP) { ASSERT(state->dts_activity == DTRACE_ACTIVITY_DRAINING); continue; } } if (ecb->dte_cond) { /* * If the dte_cond bits indicate that this * consumer is only allowed to see user-mode firings * of this probe, call the provider's dtps_usermode() * entry point to check that the probe was fired * while in a user context. Skip this ECB if that's * not the case. */ if ((ecb->dte_cond & DTRACE_COND_USERMODE) && prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, probe->dtpr_id, probe->dtpr_arg) == 0) continue; #ifdef illumos /* * This is more subtle than it looks. We have to be * absolutely certain that CRED() isn't going to * change out from under us so it's only legit to * examine that structure if we're in constrained * situations. Currently, the only times we'll this * check is if a non-super-user has enabled the * profile or syscall providers -- providers that * allow visibility of all processes. For the * profile case, the check above will ensure that * we're examining a user context. */ if (ecb->dte_cond & DTRACE_COND_OWNER) { cred_t *cr; cred_t *s_cr = ecb->dte_state->dts_cred.dcr_cred; proc_t *proc; ASSERT(s_cr != NULL); if ((cr = CRED()) == NULL || s_cr->cr_uid != cr->cr_uid || s_cr->cr_uid != cr->cr_ruid || s_cr->cr_uid != cr->cr_suid || s_cr->cr_gid != cr->cr_gid || s_cr->cr_gid != cr->cr_rgid || s_cr->cr_gid != cr->cr_sgid || (proc = ttoproc(curthread)) == NULL || (proc->p_flag & SNOCD)) continue; } if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { cred_t *cr; cred_t *s_cr = ecb->dte_state->dts_cred.dcr_cred; ASSERT(s_cr != NULL); if ((cr = CRED()) == NULL || s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) continue; } #endif } if (now - state->dts_alive > dtrace_deadman_timeout) { /* * We seem to be dead. Unless we (a) have kernel * destructive permissions (b) have explicitly enabled * destructive actions and (c) destructive actions have * not been disabled, we're going to transition into * the KILLED state, from which no further processing * on this state will be performed. */ if (!dtrace_priv_kernel_destructive(state) || !state->dts_cred.dcr_destructive || dtrace_destructive_disallow) { void *activity = &state->dts_activity; dtrace_activity_t current; do { current = state->dts_activity; } while (dtrace_cas32(activity, current, DTRACE_ACTIVITY_KILLED) != current); continue; } } if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, ecb->dte_alignment, state, &mstate)) < 0) continue; tomax = buf->dtb_tomax; ASSERT(tomax != NULL); if (ecb->dte_size != 0) { dtrace_rechdr_t dtrh; if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { mstate.dtms_timestamp = dtrace_gethrtime(); mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; } ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); dtrh.dtrh_epid = ecb->dte_epid; DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, mstate.dtms_timestamp); *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; } mstate.dtms_epid = ecb->dte_epid; mstate.dtms_present |= DTRACE_MSTATE_EPID; if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) mstate.dtms_access = DTRACE_ACCESS_KERNEL; else mstate.dtms_access = 0; if (pred != NULL) { dtrace_difo_t *dp = pred->dtp_difo; uint64_t rval; rval = dtrace_dif_emulate(dp, &mstate, vstate, state); if (!(*flags & CPU_DTRACE_ERROR) && !rval) { dtrace_cacheid_t cid = probe->dtpr_predcache; if (cid != DTRACE_CACHEIDNONE && !onintr) { /* * Update the predicate cache... */ ASSERT(cid == pred->dtp_cacheid); curthread->t_predcache = cid; } continue; } } for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && act != NULL; act = act->dta_next) { size_t valoffs; dtrace_difo_t *dp; dtrace_recdesc_t *rec = &act->dta_rec; size = rec->dtrd_size; valoffs = offs + rec->dtrd_offset; if (DTRACEACT_ISAGG(act->dta_kind)) { uint64_t v = 0xbad; dtrace_aggregation_t *agg; agg = (dtrace_aggregation_t *)act; if ((dp = act->dta_difo) != NULL) v = dtrace_dif_emulate(dp, &mstate, vstate, state); if (*flags & CPU_DTRACE_ERROR) continue; /* * Note that we always pass the expression * value from the previous iteration of the * action loop. This value will only be used * if there is an expression argument to the * aggregating action, denoted by the * dtag_hasarg field. */ dtrace_aggregate(agg, buf, offs, aggbuf, v, val); continue; } switch (act->dta_kind) { case DTRACEACT_STOP: if (dtrace_priv_proc_destructive(state)) dtrace_action_stop(); continue; case DTRACEACT_BREAKPOINT: if (dtrace_priv_kernel_destructive(state)) dtrace_action_breakpoint(ecb); continue; case DTRACEACT_PANIC: if (dtrace_priv_kernel_destructive(state)) dtrace_action_panic(ecb); continue; case DTRACEACT_STACK: if (!dtrace_priv_kernel(state)) continue; dtrace_getpcstack((pc_t *)(tomax + valoffs), size / sizeof (pc_t), probe->dtpr_aframes, DTRACE_ANCHORED(probe) ? NULL : (uint32_t *)arg0); continue; case DTRACEACT_JSTACK: case DTRACEACT_USTACK: if (!dtrace_priv_proc(state)) continue; /* * See comment in DIF_VAR_PID. */ if (DTRACE_ANCHORED(mstate.dtms_probe) && CPU_ON_INTR(CPU)) { int depth = DTRACE_USTACK_NFRAMES( rec->dtrd_arg) + 1; dtrace_bzero((void *)(tomax + valoffs), DTRACE_USTACK_STRSIZE(rec->dtrd_arg) + depth * sizeof (uint64_t)); continue; } if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && curproc->p_dtrace_helpers != NULL) { /* * This is the slow path -- we have * allocated string space, and we're * getting the stack of a process that * has helpers. Call into a separate * routine to perform this processing. */ dtrace_action_ustack(&mstate, state, (uint64_t *)(tomax + valoffs), rec->dtrd_arg); continue; } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); dtrace_getupcstack((uint64_t *) (tomax + valoffs), DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); continue; default: break; } dp = act->dta_difo; ASSERT(dp != NULL); val = dtrace_dif_emulate(dp, &mstate, vstate, state); if (*flags & CPU_DTRACE_ERROR) continue; switch (act->dta_kind) { case DTRACEACT_SPECULATE: { dtrace_rechdr_t *dtrh; ASSERT(buf == &state->dts_buffer[cpuid]); buf = dtrace_speculation_buffer(state, cpuid, val); if (buf == NULL) { *flags |= CPU_DTRACE_DROP; continue; } offs = dtrace_buffer_reserve(buf, ecb->dte_needed, ecb->dte_alignment, state, NULL); if (offs < 0) { *flags |= CPU_DTRACE_DROP; continue; } tomax = buf->dtb_tomax; ASSERT(tomax != NULL); if (ecb->dte_size == 0) continue; ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); dtrh = ((void *)(tomax + offs)); dtrh->dtrh_epid = ecb->dte_epid; /* * When the speculation is committed, all of * the records in the speculative buffer will * have their timestamps set to the commit * time. Until then, it is set to a sentinel * value, for debugability. */ DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); continue; } case DTRACEACT_PRINTM: { /* The DIF returns a 'memref'. */ uintptr_t *memref = (uintptr_t *)(uintptr_t) val; /* Get the size from the memref. */ size = memref[1]; /* * Check if the size exceeds the allocated * buffer size. */ if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { /* Flag a drop! */ *flags |= CPU_DTRACE_DROP; continue; } /* Store the size in the buffer first. */ DTRACE_STORE(uintptr_t, tomax, valoffs, size); /* * Offset the buffer address to the start * of the data. */ valoffs += sizeof(uintptr_t); /* * Reset to the memory address rather than * the memref array, then let the BYREF * code below do the work to store the * memory data in the buffer. */ val = memref[0]; break; } case DTRACEACT_CHILL: if (dtrace_priv_kernel_destructive(state)) dtrace_action_chill(&mstate, val); continue; case DTRACEACT_RAISE: if (dtrace_priv_proc_destructive(state)) dtrace_action_raise(val); continue; case DTRACEACT_COMMIT: ASSERT(!committed); /* * We need to commit our buffer state. */ if (ecb->dte_size) buf->dtb_offset = offs + ecb->dte_size; buf = &state->dts_buffer[cpuid]; dtrace_speculation_commit(state, cpuid, val); committed = 1; continue; case DTRACEACT_DISCARD: dtrace_speculation_discard(state, cpuid, val); continue; case DTRACEACT_DIFEXPR: case DTRACEACT_LIBACT: case DTRACEACT_PRINTF: case DTRACEACT_PRINTA: case DTRACEACT_SYSTEM: case DTRACEACT_FREOPEN: case DTRACEACT_TRACEMEM: break; case DTRACEACT_TRACEMEM_DYNSIZE: tracememsize = val; break; case DTRACEACT_SYM: case DTRACEACT_MOD: if (!dtrace_priv_kernel(state)) continue; break; case DTRACEACT_USYM: case DTRACEACT_UMOD: case DTRACEACT_UADDR: { #ifdef illumos struct pid *pid = curthread->t_procp->p_pidp; #endif if (!dtrace_priv_proc(state)) continue; DTRACE_STORE(uint64_t, tomax, #ifdef illumos valoffs, (uint64_t)pid->pid_id); #else valoffs, (uint64_t) curproc->p_pid); #endif DTRACE_STORE(uint64_t, tomax, valoffs + sizeof (uint64_t), val); continue; } case DTRACEACT_EXIT: { /* * For the exit action, we are going to attempt * to atomically set our activity to be * draining. If this fails (either because * another CPU has beat us to the exit action, * or because our current activity is something * other than ACTIVE or WARMUP), we will * continue. This assures that the exit action * can be successfully recorded at most once * when we're in the ACTIVE state. If we're * encountering the exit() action while in * COOLDOWN, however, we want to honor the new * status code. (We know that we're the only * thread in COOLDOWN, so there is no race.) */ void *activity = &state->dts_activity; dtrace_activity_t current = state->dts_activity; if (current == DTRACE_ACTIVITY_COOLDOWN) break; if (current != DTRACE_ACTIVITY_WARMUP) current = DTRACE_ACTIVITY_ACTIVE; if (dtrace_cas32(activity, current, DTRACE_ACTIVITY_DRAINING) != current) { *flags |= CPU_DTRACE_DROP; continue; } break; } default: ASSERT(0); } if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF || dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) { uintptr_t end = valoffs + size; if (tracememsize != 0 && valoffs + tracememsize < end) { end = valoffs + tracememsize; tracememsize = 0; } if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF && !dtrace_vcanload((void *)(uintptr_t)val, &dp->dtdo_rtype, NULL, &mstate, vstate)) continue; dtrace_store_by_ref(dp, tomax, size, &valoffs, &val, end, act->dta_intuple, dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ? DIF_TF_BYREF: DIF_TF_BYUREF); continue; } switch (size) { case 0: break; case sizeof (uint8_t): DTRACE_STORE(uint8_t, tomax, valoffs, val); break; case sizeof (uint16_t): DTRACE_STORE(uint16_t, tomax, valoffs, val); break; case sizeof (uint32_t): DTRACE_STORE(uint32_t, tomax, valoffs, val); break; case sizeof (uint64_t): DTRACE_STORE(uint64_t, tomax, valoffs, val); break; default: /* * Any other size should have been returned by * reference, not by value. */ ASSERT(0); break; } } if (*flags & CPU_DTRACE_DROP) continue; if (*flags & CPU_DTRACE_FAULT) { int ndx; dtrace_action_t *err; buf->dtb_errors++; if (probe->dtpr_id == dtrace_probeid_error) { /* * There's nothing we can do -- we had an * error on the error probe. We bump an * error counter to at least indicate that * this condition happened. */ dtrace_error(&state->dts_dblerrors); continue; } if (vtime) { /* * Before recursing on dtrace_probe(), we * need to explicitly clear out our start * time to prevent it from being accumulated * into t_dtrace_vtime. */ curthread->t_dtrace_start = 0; } /* * Iterate over the actions to figure out which action * we were processing when we experienced the error. * Note that act points _past_ the faulting action; if * act is ecb->dte_action, the fault was in the * predicate, if it's ecb->dte_action->dta_next it's * in action #1, and so on. */ for (err = ecb->dte_action, ndx = 0; err != act; err = err->dta_next, ndx++) continue; dtrace_probe_error(state, ecb->dte_epid, ndx, (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), cpu_core[cpuid].cpuc_dtrace_illval); continue; } if (!committed) buf->dtb_offset = offs + ecb->dte_size; } if (vtime) curthread->t_dtrace_start = dtrace_gethrtime(); dtrace_interrupt_enable(cookie); } /* * DTrace Probe Hashing Functions * * The functions in this section (and indeed, the functions in remaining * sections) are not _called_ from probe context. (Any exceptions to this are * marked with a "Note:".) Rather, they are called from elsewhere in the * DTrace framework to look-up probes in, add probes to and remove probes from * the DTrace probe hashes. (Each probe is hashed by each element of the * probe tuple -- allowing for fast lookups, regardless of what was * specified.) */ static uint_t dtrace_hash_str(const char *p) { unsigned int g; uint_t hval = 0; while (*p) { hval = (hval << 4) + *p++; if ((g = (hval & 0xf0000000)) != 0) hval ^= g >> 24; hval &= ~g; } return (hval); } static dtrace_hash_t * dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) { dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); hash->dth_stroffs = stroffs; hash->dth_nextoffs = nextoffs; hash->dth_prevoffs = prevoffs; hash->dth_size = 1; hash->dth_mask = hash->dth_size - 1; hash->dth_tab = kmem_zalloc(hash->dth_size * sizeof (dtrace_hashbucket_t *), KM_SLEEP); return (hash); } static void dtrace_hash_destroy(dtrace_hash_t *hash) { #ifdef DEBUG int i; for (i = 0; i < hash->dth_size; i++) ASSERT(hash->dth_tab[i] == NULL); #endif kmem_free(hash->dth_tab, hash->dth_size * sizeof (dtrace_hashbucket_t *)); kmem_free(hash, sizeof (dtrace_hash_t)); } static void dtrace_hash_resize(dtrace_hash_t *hash) { int size = hash->dth_size, i, ndx; int new_size = hash->dth_size << 1; int new_mask = new_size - 1; dtrace_hashbucket_t **new_tab, *bucket, *next; ASSERT((new_size & new_mask) == 0); new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); for (i = 0; i < size; i++) { for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { dtrace_probe_t *probe = bucket->dthb_chain; ASSERT(probe != NULL); ndx = DTRACE_HASHSTR(hash, probe) & new_mask; next = bucket->dthb_next; bucket->dthb_next = new_tab[ndx]; new_tab[ndx] = bucket; } } kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); hash->dth_tab = new_tab; hash->dth_size = new_size; hash->dth_mask = new_mask; } static void dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) { int hashval = DTRACE_HASHSTR(hash, new); int ndx = hashval & hash->dth_mask; dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; dtrace_probe_t **nextp, **prevp; for (; bucket != NULL; bucket = bucket->dthb_next) { if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) goto add; } if ((hash->dth_nbuckets >> 1) > hash->dth_size) { dtrace_hash_resize(hash); dtrace_hash_add(hash, new); return; } bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); bucket->dthb_next = hash->dth_tab[ndx]; hash->dth_tab[ndx] = bucket; hash->dth_nbuckets++; add: nextp = DTRACE_HASHNEXT(hash, new); ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); *nextp = bucket->dthb_chain; if (bucket->dthb_chain != NULL) { prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); ASSERT(*prevp == NULL); *prevp = new; } bucket->dthb_chain = new; bucket->dthb_len++; } static dtrace_probe_t * dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) { int hashval = DTRACE_HASHSTR(hash, template); int ndx = hashval & hash->dth_mask; dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; for (; bucket != NULL; bucket = bucket->dthb_next) { if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) return (bucket->dthb_chain); } return (NULL); } static int dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) { int hashval = DTRACE_HASHSTR(hash, template); int ndx = hashval & hash->dth_mask; dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; for (; bucket != NULL; bucket = bucket->dthb_next) { if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) return (bucket->dthb_len); } return (0); } static void dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) { int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); /* * Find the bucket that we're removing this probe from. */ for (; bucket != NULL; bucket = bucket->dthb_next) { if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) break; } ASSERT(bucket != NULL); if (*prevp == NULL) { if (*nextp == NULL) { /* * The removed probe was the only probe on this * bucket; we need to remove the bucket. */ dtrace_hashbucket_t *b = hash->dth_tab[ndx]; ASSERT(bucket->dthb_chain == probe); ASSERT(b != NULL); if (b == bucket) { hash->dth_tab[ndx] = bucket->dthb_next; } else { while (b->dthb_next != bucket) b = b->dthb_next; b->dthb_next = bucket->dthb_next; } ASSERT(hash->dth_nbuckets > 0); hash->dth_nbuckets--; kmem_free(bucket, sizeof (dtrace_hashbucket_t)); return; } bucket->dthb_chain = *nextp; } else { *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; } if (*nextp != NULL) *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; } /* * DTrace Utility Functions * * These are random utility functions that are _not_ called from probe context. */ static int dtrace_badattr(const dtrace_attribute_t *a) { return (a->dtat_name > DTRACE_STABILITY_MAX || a->dtat_data > DTRACE_STABILITY_MAX || a->dtat_class > DTRACE_CLASS_MAX); } /* * Return a duplicate copy of a string. If the specified string is NULL, * this function returns a zero-length string. */ static char * dtrace_strdup(const char *str) { char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); if (str != NULL) (void) strcpy(new, str); return (new); } #define DTRACE_ISALPHA(c) \ (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) static int dtrace_badname(const char *s) { char c; if (s == NULL || (c = *s++) == '\0') return (0); if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') return (1); while ((c = *s++) != '\0') { if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && c != '-' && c != '_' && c != '.' && c != '`') return (1); } return (0); } static void dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) { uint32_t priv; #ifdef illumos if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { /* * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. */ priv = DTRACE_PRIV_ALL; } else { *uidp = crgetuid(cr); *zoneidp = crgetzoneid(cr); priv = 0; if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) priv |= DTRACE_PRIV_USER; if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) priv |= DTRACE_PRIV_PROC; if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) priv |= DTRACE_PRIV_OWNER; if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) priv |= DTRACE_PRIV_ZONEOWNER; } #else priv = DTRACE_PRIV_ALL; #endif *privp = priv; } #ifdef DTRACE_ERRDEBUG static void dtrace_errdebug(const char *str) { int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; int occupied = 0; mutex_enter(&dtrace_errlock); dtrace_errlast = str; dtrace_errthread = curthread; while (occupied++ < DTRACE_ERRHASHSZ) { if (dtrace_errhash[hval].dter_msg == str) { dtrace_errhash[hval].dter_count++; goto out; } if (dtrace_errhash[hval].dter_msg != NULL) { hval = (hval + 1) % DTRACE_ERRHASHSZ; continue; } dtrace_errhash[hval].dter_msg = str; dtrace_errhash[hval].dter_count = 1; goto out; } panic("dtrace: undersized error hash"); out: mutex_exit(&dtrace_errlock); } #endif /* * DTrace Matching Functions * * These functions are used to match groups of probes, given some elements of * a probe tuple, or some globbed expressions for elements of a probe tuple. */ static int dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, zoneid_t zoneid) { if (priv != DTRACE_PRIV_ALL) { uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; uint32_t match = priv & ppriv; /* * No PRIV_DTRACE_* privileges... */ if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | DTRACE_PRIV_KERNEL)) == 0) return (0); /* * No matching bits, but there were bits to match... */ if (match == 0 && ppriv != 0) return (0); /* * Need to have permissions to the process, but don't... */ if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { return (0); } /* * Need to be in the same zone unless we possess the * privilege to examine all zones. */ if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { return (0); } } return (1); } /* * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which * consists of input pattern strings and an ops-vector to evaluate them. * This function returns >0 for match, 0 for no match, and <0 for error. */ static int dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, zoneid_t zoneid) { dtrace_provider_t *pvp = prp->dtpr_provider; int rv; if (pvp->dtpv_defunct) return (0); if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) return (rv); if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) return (rv); if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) return (rv); if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) return (rv); if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) return (0); return (rv); } /* * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) * interface for matching a glob pattern 'p' to an input string 's'. Unlike * libc's version, the kernel version only applies to 8-bit ASCII strings. * In addition, all of the recursion cases except for '*' matching have been * unwound. For '*', we still implement recursive evaluation, but a depth * counter is maintained and matching is aborted if we recurse too deep. * The function returns 0 if no match, >0 if match, and <0 if recursion error. */ static int dtrace_match_glob(const char *s, const char *p, int depth) { const char *olds; char s1, c; int gs; if (depth > DTRACE_PROBEKEY_MAXDEPTH) return (-1); if (s == NULL) s = ""; /* treat NULL as empty string */ top: olds = s; s1 = *s++; if (p == NULL) return (0); if ((c = *p++) == '\0') return (s1 == '\0'); switch (c) { case '[': { int ok = 0, notflag = 0; char lc = '\0'; if (s1 == '\0') return (0); if (*p == '!') { notflag = 1; p++; } if ((c = *p++) == '\0') return (0); do { if (c == '-' && lc != '\0' && *p != ']') { if ((c = *p++) == '\0') return (0); if (c == '\\' && (c = *p++) == '\0') return (0); if (notflag) { if (s1 < lc || s1 > c) ok++; else return (0); } else if (lc <= s1 && s1 <= c) ok++; } else if (c == '\\' && (c = *p++) == '\0') return (0); lc = c; /* save left-hand 'c' for next iteration */ if (notflag) { if (s1 != c) ok++; else return (0); } else if (s1 == c) ok++; if ((c = *p++) == '\0') return (0); } while (c != ']'); if (ok) goto top; return (0); } case '\\': if ((c = *p++) == '\0') return (0); /*FALLTHRU*/ default: if (c != s1) return (0); /*FALLTHRU*/ case '?': if (s1 != '\0') goto top; return (0); case '*': while (*p == '*') p++; /* consecutive *'s are identical to a single one */ if (*p == '\0') return (1); for (s = olds; *s != '\0'; s++) { if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) return (gs); } return (0); } } /*ARGSUSED*/ static int dtrace_match_string(const char *s, const char *p, int depth) { return (s != NULL && strcmp(s, p) == 0); } /*ARGSUSED*/ static int dtrace_match_nul(const char *s, const char *p, int depth) { return (1); /* always match the empty pattern */ } /*ARGSUSED*/ static int dtrace_match_nonzero(const char *s, const char *p, int depth) { return (s != NULL && s[0] != '\0'); } static int dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) { dtrace_probe_t template, *probe; dtrace_hash_t *hash = NULL; int len, best = INT_MAX, nmatched = 0; dtrace_id_t i; ASSERT(MUTEX_HELD(&dtrace_lock)); /* * If the probe ID is specified in the key, just lookup by ID and * invoke the match callback once if a matching probe is found. */ if (pkp->dtpk_id != DTRACE_IDNONE) { if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { (void) (*matched)(probe, arg); nmatched++; } return (nmatched); } template.dtpr_mod = (char *)pkp->dtpk_mod; template.dtpr_func = (char *)pkp->dtpk_func; template.dtpr_name = (char *)pkp->dtpk_name; /* * We want to find the most distinct of the module name, function * name, and name. So for each one that is not a glob pattern or * empty string, we perform a lookup in the corresponding hash and * use the hash table with the fewest collisions to do our search. */ if (pkp->dtpk_mmatch == &dtrace_match_string && (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { best = len; hash = dtrace_bymod; } if (pkp->dtpk_fmatch == &dtrace_match_string && (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { best = len; hash = dtrace_byfunc; } if (pkp->dtpk_nmatch == &dtrace_match_string && (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { best = len; hash = dtrace_byname; } /* * If we did not select a hash table, iterate over every probe and * invoke our callback for each one that matches our input probe key. */ if (hash == NULL) { for (i = 0; i < dtrace_nprobes; i++) { if ((probe = dtrace_probes[i]) == NULL || dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) continue; nmatched++; if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) break; } return (nmatched); } /* * If we selected a hash table, iterate over each probe of the same key * name and invoke the callback for every probe that matches the other * attributes of our input probe key. */ for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; probe = *(DTRACE_HASHNEXT(hash, probe))) { if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) continue; nmatched++; if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) break; } return (nmatched); } /* * Return the function pointer dtrace_probecmp() should use to compare the * specified pattern with a string. For NULL or empty patterns, we select * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). * For non-empty non-glob strings, we use dtrace_match_string(). */ static dtrace_probekey_f * dtrace_probekey_func(const char *p) { char c; if (p == NULL || *p == '\0') return (&dtrace_match_nul); while ((c = *p++) != '\0') { if (c == '[' || c == '?' || c == '*' || c == '\\') return (&dtrace_match_glob); } return (&dtrace_match_string); } /* * Build a probe comparison key for use with dtrace_match_probe() from the * given probe description. By convention, a null key only matches anchored * probes: if each field is the empty string, reset dtpk_fmatch to * dtrace_match_nonzero(). */ static void dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) { pkp->dtpk_prov = pdp->dtpd_provider; pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); pkp->dtpk_mod = pdp->dtpd_mod; pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); pkp->dtpk_func = pdp->dtpd_func; pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); pkp->dtpk_name = pdp->dtpd_name; pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); pkp->dtpk_id = pdp->dtpd_id; if (pkp->dtpk_id == DTRACE_IDNONE && pkp->dtpk_pmatch == &dtrace_match_nul && pkp->dtpk_mmatch == &dtrace_match_nul && pkp->dtpk_fmatch == &dtrace_match_nul && pkp->dtpk_nmatch == &dtrace_match_nul) pkp->dtpk_fmatch = &dtrace_match_nonzero; } /* * DTrace Provider-to-Framework API Functions * * These functions implement much of the Provider-to-Framework API, as * described in . The parts of the API not in this section are * the functions in the API for probe management (found below), and * dtrace_probe() itself (found above). */ /* * Register the calling provider with the DTrace framework. This should * generally be called by DTrace providers in their attach(9E) entry point. */ int dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) { dtrace_provider_t *provider; if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { cmn_err(CE_WARN, "failed to register provider '%s': invalid " "arguments", name ? name : ""); return (EINVAL); } if (name[0] == '\0' || dtrace_badname(name)) { cmn_err(CE_WARN, "failed to register provider '%s': invalid " "provider name", name); return (EINVAL); } if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || pops->dtps_enable == NULL || pops->dtps_disable == NULL || pops->dtps_destroy == NULL || ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { cmn_err(CE_WARN, "failed to register provider '%s': invalid " "provider ops", name); return (EINVAL); } if (dtrace_badattr(&pap->dtpa_provider) || dtrace_badattr(&pap->dtpa_mod) || dtrace_badattr(&pap->dtpa_func) || dtrace_badattr(&pap->dtpa_name) || dtrace_badattr(&pap->dtpa_args)) { cmn_err(CE_WARN, "failed to register provider '%s': invalid " "provider attributes", name); return (EINVAL); } if (priv & ~DTRACE_PRIV_ALL) { cmn_err(CE_WARN, "failed to register provider '%s': invalid " "privilege attributes", name); return (EINVAL); } if ((priv & DTRACE_PRIV_KERNEL) && (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && pops->dtps_usermode == NULL) { cmn_err(CE_WARN, "failed to register provider '%s': need " "dtps_usermode() op for given privilege attributes", name); return (EINVAL); } provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); (void) strcpy(provider->dtpv_name, name); provider->dtpv_attr = *pap; provider->dtpv_priv.dtpp_flags = priv; if (cr != NULL) { provider->dtpv_priv.dtpp_uid = crgetuid(cr); provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); } provider->dtpv_pops = *pops; if (pops->dtps_provide == NULL) { ASSERT(pops->dtps_provide_module != NULL); provider->dtpv_pops.dtps_provide = (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; } if (pops->dtps_provide_module == NULL) { ASSERT(pops->dtps_provide != NULL); provider->dtpv_pops.dtps_provide_module = (void (*)(void *, modctl_t *))dtrace_nullop; } if (pops->dtps_suspend == NULL) { ASSERT(pops->dtps_resume == NULL); provider->dtpv_pops.dtps_suspend = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; provider->dtpv_pops.dtps_resume = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; } provider->dtpv_arg = arg; *idp = (dtrace_provider_id_t)provider; if (pops == &dtrace_provider_ops) { ASSERT(MUTEX_HELD(&dtrace_provider_lock)); ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(dtrace_anon.dta_enabling == NULL); /* * We make sure that the DTrace provider is at the head of * the provider chain. */ provider->dtpv_next = dtrace_provider; dtrace_provider = provider; return (0); } mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); /* * If there is at least one provider registered, we'll add this * provider after the first provider. */ if (dtrace_provider != NULL) { provider->dtpv_next = dtrace_provider->dtpv_next; dtrace_provider->dtpv_next = provider; } else { dtrace_provider = provider; } if (dtrace_retained != NULL) { dtrace_enabling_provide(provider); /* * Now we need to call dtrace_enabling_matchall() -- which * will acquire cpu_lock and dtrace_lock. We therefore need * to drop all of our locks before calling into it... */ mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); dtrace_enabling_matchall(); return (0); } mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); return (0); } /* * Unregister the specified provider from the DTrace framework. This should * generally be called by DTrace providers in their detach(9E) entry point. */ int dtrace_unregister(dtrace_provider_id_t id) { dtrace_provider_t *old = (dtrace_provider_t *)id; dtrace_provider_t *prev = NULL; int i, self = 0, noreap = 0; dtrace_probe_t *probe, *first = NULL; if (old->dtpv_pops.dtps_enable == (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { /* * If DTrace itself is the provider, we're called with locks * already held. */ ASSERT(old == dtrace_provider); #ifdef illumos ASSERT(dtrace_devi != NULL); #endif ASSERT(MUTEX_HELD(&dtrace_provider_lock)); ASSERT(MUTEX_HELD(&dtrace_lock)); self = 1; if (dtrace_provider->dtpv_next != NULL) { /* * There's another provider here; return failure. */ return (EBUSY); } } else { mutex_enter(&dtrace_provider_lock); #ifdef illumos mutex_enter(&mod_lock); #endif mutex_enter(&dtrace_lock); } /* * If anyone has /dev/dtrace open, or if there are anonymous enabled * probes, we refuse to let providers slither away, unless this * provider has already been explicitly invalidated. */ if (!old->dtpv_defunct && (dtrace_opens || (dtrace_anon.dta_state != NULL && dtrace_anon.dta_state->dts_necbs > 0))) { if (!self) { mutex_exit(&dtrace_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); } return (EBUSY); } /* * Attempt to destroy the probes associated with this provider. */ for (i = 0; i < dtrace_nprobes; i++) { if ((probe = dtrace_probes[i]) == NULL) continue; if (probe->dtpr_provider != old) continue; if (probe->dtpr_ecb == NULL) continue; /* * If we are trying to unregister a defunct provider, and the * provider was made defunct within the interval dictated by * dtrace_unregister_defunct_reap, we'll (asynchronously) * attempt to reap our enablings. To denote that the provider * should reattempt to unregister itself at some point in the * future, we will return a differentiable error code (EAGAIN * instead of EBUSY) in this case. */ if (dtrace_gethrtime() - old->dtpv_defunct > dtrace_unregister_defunct_reap) noreap = 1; if (!self) { mutex_exit(&dtrace_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); } if (noreap) return (EBUSY); (void) taskq_dispatch(dtrace_taskq, (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); return (EAGAIN); } /* * All of the probes for this provider are disabled; we can safely * remove all of them from their hash chains and from the probe array. */ for (i = 0; i < dtrace_nprobes; i++) { if ((probe = dtrace_probes[i]) == NULL) continue; if (probe->dtpr_provider != old) continue; dtrace_probes[i] = NULL; dtrace_hash_remove(dtrace_bymod, probe); dtrace_hash_remove(dtrace_byfunc, probe); dtrace_hash_remove(dtrace_byname, probe); if (first == NULL) { first = probe; probe->dtpr_nextmod = NULL; } else { probe->dtpr_nextmod = first; first = probe; } } /* * The provider's probes have been removed from the hash chains and * from the probe array. Now issue a dtrace_sync() to be sure that * everyone has cleared out from any probe array processing. */ dtrace_sync(); for (probe = first; probe != NULL; probe = first) { first = probe->dtpr_nextmod; old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, probe->dtpr_arg); kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); #ifdef illumos vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); #else free_unr(dtrace_arena, probe->dtpr_id); #endif kmem_free(probe, sizeof (dtrace_probe_t)); } if ((prev = dtrace_provider) == old) { #ifdef illumos ASSERT(self || dtrace_devi == NULL); ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); #endif dtrace_provider = old->dtpv_next; } else { while (prev != NULL && prev->dtpv_next != old) prev = prev->dtpv_next; if (prev == NULL) { panic("attempt to unregister non-existent " "dtrace provider %p\n", (void *)id); } prev->dtpv_next = old->dtpv_next; } if (!self) { mutex_exit(&dtrace_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); } kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); kmem_free(old, sizeof (dtrace_provider_t)); return (0); } /* * Invalidate the specified provider. All subsequent probe lookups for the * specified provider will fail, but its probes will not be removed. */ void dtrace_invalidate(dtrace_provider_id_t id) { dtrace_provider_t *pvp = (dtrace_provider_t *)id; ASSERT(pvp->dtpv_pops.dtps_enable != (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); pvp->dtpv_defunct = dtrace_gethrtime(); mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); } /* * Indicate whether or not DTrace has attached. */ int dtrace_attached(void) { /* * dtrace_provider will be non-NULL iff the DTrace driver has * attached. (It's non-NULL because DTrace is always itself a * provider.) */ return (dtrace_provider != NULL); } /* * Remove all the unenabled probes for the given provider. This function is * not unlike dtrace_unregister(), except that it doesn't remove the provider * -- just as many of its associated probes as it can. */ int dtrace_condense(dtrace_provider_id_t id) { dtrace_provider_t *prov = (dtrace_provider_t *)id; int i; dtrace_probe_t *probe; /* * Make sure this isn't the dtrace provider itself. */ ASSERT(prov->dtpv_pops.dtps_enable != (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); /* * Attempt to destroy the probes associated with this provider. */ for (i = 0; i < dtrace_nprobes; i++) { if ((probe = dtrace_probes[i]) == NULL) continue; if (probe->dtpr_provider != prov) continue; if (probe->dtpr_ecb != NULL) continue; dtrace_probes[i] = NULL; dtrace_hash_remove(dtrace_bymod, probe); dtrace_hash_remove(dtrace_byfunc, probe); dtrace_hash_remove(dtrace_byname, probe); prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, probe->dtpr_arg); kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); kmem_free(probe, sizeof (dtrace_probe_t)); #ifdef illumos vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); #else free_unr(dtrace_arena, i + 1); #endif } mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); return (0); } /* * DTrace Probe Management Functions * * The functions in this section perform the DTrace probe management, * including functions to create probes, look-up probes, and call into the * providers to request that probes be provided. Some of these functions are * in the Provider-to-Framework API; these functions can be identified by the * fact that they are not declared "static". */ /* * Create a probe with the specified module name, function name, and name. */ dtrace_id_t dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, const char *func, const char *name, int aframes, void *arg) { dtrace_probe_t *probe, **probes; dtrace_provider_t *provider = (dtrace_provider_t *)prov; dtrace_id_t id; if (provider == dtrace_provider) { ASSERT(MUTEX_HELD(&dtrace_lock)); } else { mutex_enter(&dtrace_lock); } #ifdef illumos id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, VM_BESTFIT | VM_SLEEP); #else id = alloc_unr(dtrace_arena); #endif probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); probe->dtpr_id = id; probe->dtpr_gen = dtrace_probegen++; probe->dtpr_mod = dtrace_strdup(mod); probe->dtpr_func = dtrace_strdup(func); probe->dtpr_name = dtrace_strdup(name); probe->dtpr_arg = arg; probe->dtpr_aframes = aframes; probe->dtpr_provider = provider; dtrace_hash_add(dtrace_bymod, probe); dtrace_hash_add(dtrace_byfunc, probe); dtrace_hash_add(dtrace_byname, probe); if (id - 1 >= dtrace_nprobes) { size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); size_t nsize = osize << 1; if (nsize == 0) { ASSERT(osize == 0); ASSERT(dtrace_probes == NULL); nsize = sizeof (dtrace_probe_t *); } probes = kmem_zalloc(nsize, KM_SLEEP); if (dtrace_probes == NULL) { ASSERT(osize == 0); dtrace_probes = probes; dtrace_nprobes = 1; } else { dtrace_probe_t **oprobes = dtrace_probes; bcopy(oprobes, probes, osize); dtrace_membar_producer(); dtrace_probes = probes; dtrace_sync(); /* * All CPUs are now seeing the new probes array; we can * safely free the old array. */ kmem_free(oprobes, osize); dtrace_nprobes <<= 1; } ASSERT(id - 1 < dtrace_nprobes); } ASSERT(dtrace_probes[id - 1] == NULL); dtrace_probes[id - 1] = probe; if (provider != dtrace_provider) mutex_exit(&dtrace_lock); return (id); } static dtrace_probe_t * dtrace_probe_lookup_id(dtrace_id_t id) { ASSERT(MUTEX_HELD(&dtrace_lock)); if (id == 0 || id > dtrace_nprobes) return (NULL); return (dtrace_probes[id - 1]); } static int dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) { *((dtrace_id_t *)arg) = probe->dtpr_id; return (DTRACE_MATCH_DONE); } /* * Look up a probe based on provider and one or more of module name, function * name and probe name. */ dtrace_id_t dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, char *func, char *name) { dtrace_probekey_t pkey; dtrace_id_t id; int match; pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; pkey.dtpk_pmatch = &dtrace_match_string; pkey.dtpk_mod = mod; pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; pkey.dtpk_func = func; pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; pkey.dtpk_name = name; pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; pkey.dtpk_id = DTRACE_IDNONE; mutex_enter(&dtrace_lock); match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, dtrace_probe_lookup_match, &id); mutex_exit(&dtrace_lock); ASSERT(match == 1 || match == 0); return (match ? id : 0); } /* * Returns the probe argument associated with the specified probe. */ void * dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) { dtrace_probe_t *probe; void *rval = NULL; mutex_enter(&dtrace_lock); if ((probe = dtrace_probe_lookup_id(pid)) != NULL && probe->dtpr_provider == (dtrace_provider_t *)id) rval = probe->dtpr_arg; mutex_exit(&dtrace_lock); return (rval); } /* * Copy a probe into a probe description. */ static void dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) { bzero(pdp, sizeof (dtrace_probedesc_t)); pdp->dtpd_id = prp->dtpr_id; (void) strncpy(pdp->dtpd_provider, prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); } /* * Called to indicate that a probe -- or probes -- should be provided by a * specfied provider. If the specified description is NULL, the provider will * be told to provide all of its probes. (This is done whenever a new * consumer comes along, or whenever a retained enabling is to be matched.) If * the specified description is non-NULL, the provider is given the * opportunity to dynamically provide the specified probe, allowing providers * to support the creation of probes on-the-fly. (So-called _autocreated_ * probes.) If the provider is NULL, the operations will be applied to all * providers; if the provider is non-NULL the operations will only be applied * to the specified provider. The dtrace_provider_lock must be held, and the * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation * will need to grab the dtrace_lock when it reenters the framework through * dtrace_probe_lookup(), dtrace_probe_create(), etc. */ static void dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) { #ifdef illumos modctl_t *ctl; #endif int all = 0; ASSERT(MUTEX_HELD(&dtrace_provider_lock)); if (prv == NULL) { all = 1; prv = dtrace_provider; } do { /* * First, call the blanket provide operation. */ prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); #ifdef illumos /* * Now call the per-module provide operation. We will grab * mod_lock to prevent the list from being modified. Note * that this also prevents the mod_busy bits from changing. * (mod_busy can only be changed with mod_lock held.) */ mutex_enter(&mod_lock); ctl = &modules; do { if (ctl->mod_busy || ctl->mod_mp == NULL) continue; prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); } while ((ctl = ctl->mod_next) != &modules); mutex_exit(&mod_lock); #endif } while (all && (prv = prv->dtpv_next) != NULL); } #ifdef illumos /* * Iterate over each probe, and call the Framework-to-Provider API function * denoted by offs. */ static void dtrace_probe_foreach(uintptr_t offs) { dtrace_provider_t *prov; void (*func)(void *, dtrace_id_t, void *); dtrace_probe_t *probe; dtrace_icookie_t cookie; int i; /* * We disable interrupts to walk through the probe array. This is * safe -- the dtrace_sync() in dtrace_unregister() assures that we * won't see stale data. */ cookie = dtrace_interrupt_disable(); for (i = 0; i < dtrace_nprobes; i++) { if ((probe = dtrace_probes[i]) == NULL) continue; if (probe->dtpr_ecb == NULL) { /* * This probe isn't enabled -- don't call the function. */ continue; } prov = probe->dtpr_provider; func = *((void(**)(void *, dtrace_id_t, void *)) ((uintptr_t)&prov->dtpv_pops + offs)); func(prov->dtpv_arg, i + 1, probe->dtpr_arg); } dtrace_interrupt_enable(cookie); } #endif static int dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) { dtrace_probekey_t pkey; uint32_t priv; uid_t uid; zoneid_t zoneid; ASSERT(MUTEX_HELD(&dtrace_lock)); dtrace_ecb_create_cache = NULL; if (desc == NULL) { /* * If we're passed a NULL description, we're being asked to * create an ECB with a NULL probe. */ (void) dtrace_ecb_create_enable(NULL, enab); return (0); } dtrace_probekey(desc, &pkey); dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, &priv, &uid, &zoneid); return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, enab)); } /* * DTrace Helper Provider Functions */ static void dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) { attr->dtat_name = DOF_ATTR_NAME(dofattr); attr->dtat_data = DOF_ATTR_DATA(dofattr); attr->dtat_class = DOF_ATTR_CLASS(dofattr); } static void dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, const dof_provider_t *dofprov, char *strtab) { hprov->dthpv_provname = strtab + dofprov->dofpv_name; dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, dofprov->dofpv_provattr); dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, dofprov->dofpv_modattr); dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, dofprov->dofpv_funcattr); dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, dofprov->dofpv_nameattr); dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, dofprov->dofpv_argsattr); } static void dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; dof_provider_t *provider; dof_probe_t *probe; uint32_t *off, *enoff; uint8_t *arg; char *strtab; uint_t i, nprobes; dtrace_helper_provdesc_t dhpv; dtrace_helper_probedesc_t dhpb; dtrace_meta_t *meta = dtrace_meta_pid; dtrace_mops_t *mops = &meta->dtm_mops; void *parg; provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + provider->dofpv_strtab * dof->dofh_secsize); prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + provider->dofpv_probes * dof->dofh_secsize); arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + provider->dofpv_prargs * dof->dofh_secsize); off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + provider->dofpv_proffs * dof->dofh_secsize); strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); enoff = NULL; /* * See dtrace_helper_provider_validate(). */ if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && provider->dofpv_prenoffs != DOF_SECT_NONE) { enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + provider->dofpv_prenoffs * dof->dofh_secsize); enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); } nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; /* * Create the provider. */ dtrace_dofprov2hprov(&dhpv, provider, strtab); if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) return; meta->dtm_count++; /* * Create the probes. */ for (i = 0; i < nprobes; i++) { probe = (dof_probe_t *)(uintptr_t)(daddr + prb_sec->dofs_offset + i * prb_sec->dofs_entsize); /* See the check in dtrace_helper_provider_validate(). */ if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) continue; dhpb.dthpb_mod = dhp->dofhp_mod; dhpb.dthpb_func = strtab + probe->dofpr_func; dhpb.dthpb_name = strtab + probe->dofpr_name; dhpb.dthpb_base = probe->dofpr_addr; dhpb.dthpb_offs = off + probe->dofpr_offidx; dhpb.dthpb_noffs = probe->dofpr_noffs; if (enoff != NULL) { dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; } else { dhpb.dthpb_enoffs = NULL; dhpb.dthpb_nenoffs = 0; } dhpb.dthpb_args = arg + probe->dofpr_argidx; dhpb.dthpb_nargc = probe->dofpr_nargc; dhpb.dthpb_xargc = probe->dofpr_xargc; dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); } } static void dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; int i; ASSERT(MUTEX_HELD(&dtrace_meta_lock)); for (i = 0; i < dof->dofh_secnum; i++) { dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + i * dof->dofh_secsize); if (sec->dofs_type != DOF_SECT_PROVIDER) continue; dtrace_helper_provide_one(dhp, sec, pid); } /* * We may have just created probes, so we must now rematch against * any retained enablings. Note that this call will acquire both * cpu_lock and dtrace_lock; the fact that we are holding * dtrace_meta_lock now is what defines the ordering with respect to * these three locks. */ dtrace_enabling_matchall(); } static void dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; dof_sec_t *str_sec; dof_provider_t *provider; char *strtab; dtrace_helper_provdesc_t dhpv; dtrace_meta_t *meta = dtrace_meta_pid; dtrace_mops_t *mops = &meta->dtm_mops; provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + provider->dofpv_strtab * dof->dofh_secsize); strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); /* * Create the provider. */ dtrace_dofprov2hprov(&dhpv, provider, strtab); mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); meta->dtm_count--; } static void dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; int i; ASSERT(MUTEX_HELD(&dtrace_meta_lock)); for (i = 0; i < dof->dofh_secnum; i++) { dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + i * dof->dofh_secsize); if (sec->dofs_type != DOF_SECT_PROVIDER) continue; dtrace_helper_provider_remove_one(dhp, sec, pid); } } /* * DTrace Meta Provider-to-Framework API Functions * * These functions implement the Meta Provider-to-Framework API, as described * in . */ int dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, dtrace_meta_provider_id_t *idp) { dtrace_meta_t *meta; dtrace_helpers_t *help, *next; int i; *idp = DTRACE_METAPROVNONE; /* * We strictly don't need the name, but we hold onto it for * debuggability. All hail error queues! */ if (name == NULL) { cmn_err(CE_WARN, "failed to register meta-provider: " "invalid name"); return (EINVAL); } if (mops == NULL || mops->dtms_create_probe == NULL || mops->dtms_provide_pid == NULL || mops->dtms_remove_pid == NULL) { cmn_err(CE_WARN, "failed to register meta-register %s: " "invalid ops", name); return (EINVAL); } meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); meta->dtm_mops = *mops; meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); (void) strcpy(meta->dtm_name, name); meta->dtm_arg = arg; mutex_enter(&dtrace_meta_lock); mutex_enter(&dtrace_lock); if (dtrace_meta_pid != NULL) { mutex_exit(&dtrace_lock); mutex_exit(&dtrace_meta_lock); cmn_err(CE_WARN, "failed to register meta-register %s: " "user-land meta-provider exists", name); kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); kmem_free(meta, sizeof (dtrace_meta_t)); return (EINVAL); } dtrace_meta_pid = meta; *idp = (dtrace_meta_provider_id_t)meta; /* * If there are providers and probes ready to go, pass them * off to the new meta provider now. */ help = dtrace_deferred_pid; dtrace_deferred_pid = NULL; mutex_exit(&dtrace_lock); while (help != NULL) { for (i = 0; i < help->dthps_nprovs; i++) { dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, help->dthps_pid); } next = help->dthps_next; help->dthps_next = NULL; help->dthps_prev = NULL; help->dthps_deferred = 0; help = next; } mutex_exit(&dtrace_meta_lock); return (0); } int dtrace_meta_unregister(dtrace_meta_provider_id_t id) { dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; mutex_enter(&dtrace_meta_lock); mutex_enter(&dtrace_lock); if (old == dtrace_meta_pid) { pp = &dtrace_meta_pid; } else { panic("attempt to unregister non-existent " "dtrace meta-provider %p\n", (void *)old); } if (old->dtm_count != 0) { mutex_exit(&dtrace_lock); mutex_exit(&dtrace_meta_lock); return (EBUSY); } *pp = NULL; mutex_exit(&dtrace_lock); mutex_exit(&dtrace_meta_lock); kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); kmem_free(old, sizeof (dtrace_meta_t)); return (0); } /* * DTrace DIF Object Functions */ static int dtrace_difo_err(uint_t pc, const char *format, ...) { if (dtrace_err_verbose) { va_list alist; (void) uprintf("dtrace DIF object error: [%u]: ", pc); va_start(alist, format); (void) vuprintf(format, alist); va_end(alist); } #ifdef DTRACE_ERRDEBUG dtrace_errdebug(format); #endif return (1); } /* * Validate a DTrace DIF object by checking the IR instructions. The following * rules are currently enforced by dtrace_difo_validate(): * * 1. Each instruction must have a valid opcode * 2. Each register, string, variable, or subroutine reference must be valid * 3. No instruction can modify register %r0 (must be zero) * 4. All instruction reserved bits must be set to zero * 5. The last instruction must be a "ret" instruction * 6. All branch targets must reference a valid instruction _after_ the branch */ static int dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, cred_t *cr) { int err = 0, i; int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; int kcheckload; uint_t pc; int maxglobal = -1, maxlocal = -1, maxtlocal = -1; kcheckload = cr == NULL || (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; dp->dtdo_destructive = 0; for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { dif_instr_t instr = dp->dtdo_buf[pc]; uint_t r1 = DIF_INSTR_R1(instr); uint_t r2 = DIF_INSTR_R2(instr); uint_t rd = DIF_INSTR_RD(instr); uint_t rs = DIF_INSTR_RS(instr); uint_t label = DIF_INSTR_LABEL(instr); uint_t v = DIF_INSTR_VAR(instr); uint_t subr = DIF_INSTR_SUBR(instr); uint_t type = DIF_INSTR_TYPE(instr); uint_t op = DIF_INSTR_OP(instr); switch (op) { case DIF_OP_OR: case DIF_OP_XOR: case DIF_OP_AND: case DIF_OP_SLL: case DIF_OP_SRL: case DIF_OP_SRA: case DIF_OP_SUB: case DIF_OP_ADD: case DIF_OP_MUL: case DIF_OP_SDIV: case DIF_OP_UDIV: case DIF_OP_SREM: case DIF_OP_UREM: case DIF_OP_COPYS: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 >= nregs) err += efunc(pc, "invalid register %u\n", r2); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_NOT: case DIF_OP_MOV: case DIF_OP_ALLOCS: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 != 0) err += efunc(pc, "non-zero reserved bits\n"); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_LDSB: case DIF_OP_LDSH: case DIF_OP_LDSW: case DIF_OP_LDUB: case DIF_OP_LDUH: case DIF_OP_LDUW: case DIF_OP_LDX: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 != 0) err += efunc(pc, "non-zero reserved bits\n"); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); if (kcheckload) dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); break; case DIF_OP_RLDSB: case DIF_OP_RLDSH: case DIF_OP_RLDSW: case DIF_OP_RLDUB: case DIF_OP_RLDUH: case DIF_OP_RLDUW: case DIF_OP_RLDX: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 != 0) err += efunc(pc, "non-zero reserved bits\n"); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_ULDSB: case DIF_OP_ULDSH: case DIF_OP_ULDSW: case DIF_OP_ULDUB: case DIF_OP_ULDUH: case DIF_OP_ULDUW: case DIF_OP_ULDX: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 != 0) err += efunc(pc, "non-zero reserved bits\n"); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_STB: case DIF_OP_STH: case DIF_OP_STW: case DIF_OP_STX: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 != 0) err += efunc(pc, "non-zero reserved bits\n"); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to 0 address\n"); break; case DIF_OP_CMP: case DIF_OP_SCMP: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 >= nregs) err += efunc(pc, "invalid register %u\n", r2); if (rd != 0) err += efunc(pc, "non-zero reserved bits\n"); break; case DIF_OP_TST: if (r1 >= nregs) err += efunc(pc, "invalid register %u\n", r1); if (r2 != 0 || rd != 0) err += efunc(pc, "non-zero reserved bits\n"); break; case DIF_OP_BA: case DIF_OP_BE: case DIF_OP_BNE: case DIF_OP_BG: case DIF_OP_BGU: case DIF_OP_BGE: case DIF_OP_BGEU: case DIF_OP_BL: case DIF_OP_BLU: case DIF_OP_BLE: case DIF_OP_BLEU: if (label >= dp->dtdo_len) { err += efunc(pc, "invalid branch target %u\n", label); } if (label <= pc) { err += efunc(pc, "backward branch to %u\n", label); } break; case DIF_OP_RET: if (r1 != 0 || r2 != 0) err += efunc(pc, "non-zero reserved bits\n"); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); break; case DIF_OP_NOP: case DIF_OP_POPTS: case DIF_OP_FLUSHTS: if (r1 != 0 || r2 != 0 || rd != 0) err += efunc(pc, "non-zero reserved bits\n"); break; case DIF_OP_SETX: if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { err += efunc(pc, "invalid integer ref %u\n", DIF_INSTR_INTEGER(instr)); } if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_SETS: if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { err += efunc(pc, "invalid string ref %u\n", DIF_INSTR_STRING(instr)); } if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_LDGA: case DIF_OP_LDTA: if (r1 > DIF_VAR_ARRAY_MAX) err += efunc(pc, "invalid array %u\n", r1); if (r2 >= nregs) err += efunc(pc, "invalid register %u\n", r2); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_LDGS: case DIF_OP_LDTS: case DIF_OP_LDLS: case DIF_OP_LDGAA: case DIF_OP_LDTAA: if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) err += efunc(pc, "invalid variable %u\n", v); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); break; case DIF_OP_STGS: case DIF_OP_STTS: case DIF_OP_STLS: case DIF_OP_STGAA: case DIF_OP_STTAA: if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) err += efunc(pc, "invalid variable %u\n", v); if (rs >= nregs) err += efunc(pc, "invalid register %u\n", rd); break; case DIF_OP_CALL: if (subr > DIF_SUBR_MAX) err += efunc(pc, "invalid subr %u\n", subr); if (rd >= nregs) err += efunc(pc, "invalid register %u\n", rd); if (rd == 0) err += efunc(pc, "cannot write to %r0\n"); if (subr == DIF_SUBR_COPYOUT || subr == DIF_SUBR_COPYOUTSTR) { dp->dtdo_destructive = 1; } if (subr == DIF_SUBR_GETF) { /* * If we have a getf() we need to record that * in our state. Note that our state can be * NULL if this is a helper -- but in that * case, the call to getf() is itself illegal, * and will be caught (slightly later) when * the helper is validated. */ if (vstate->dtvs_state != NULL) vstate->dtvs_state->dts_getf++; } break; case DIF_OP_PUSHTR: if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) err += efunc(pc, "invalid ref type %u\n", type); if (r2 >= nregs) err += efunc(pc, "invalid register %u\n", r2); if (rs >= nregs) err += efunc(pc, "invalid register %u\n", rs); break; case DIF_OP_PUSHTV: if (type != DIF_TYPE_CTF) err += efunc(pc, "invalid val type %u\n", type); if (r2 >= nregs) err += efunc(pc, "invalid register %u\n", r2); if (rs >= nregs) err += efunc(pc, "invalid register %u\n", rs); break; default: err += efunc(pc, "invalid opcode %u\n", DIF_INSTR_OP(instr)); } } if (dp->dtdo_len != 0 && DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { err += efunc(dp->dtdo_len - 1, "expected 'ret' as last DIF instruction\n"); } if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) { /* * If we're not returning by reference, the size must be either * 0 or the size of one of the base types. */ switch (dp->dtdo_rtype.dtdt_size) { case 0: case sizeof (uint8_t): case sizeof (uint16_t): case sizeof (uint32_t): case sizeof (uint64_t): break; default: err += efunc(dp->dtdo_len - 1, "bad return size\n"); } } for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; dtrace_diftype_t *vt, *et; uint_t id, ndx; if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && v->dtdv_scope != DIFV_SCOPE_THREAD && v->dtdv_scope != DIFV_SCOPE_LOCAL) { err += efunc(i, "unrecognized variable scope %d\n", v->dtdv_scope); break; } if (v->dtdv_kind != DIFV_KIND_ARRAY && v->dtdv_kind != DIFV_KIND_SCALAR) { err += efunc(i, "unrecognized variable type %d\n", v->dtdv_kind); break; } if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { err += efunc(i, "%d exceeds variable id limit\n", id); break; } if (id < DIF_VAR_OTHER_UBASE) continue; /* * For user-defined variables, we need to check that this * definition is identical to any previous definition that we * encountered. */ ndx = id - DIF_VAR_OTHER_UBASE; switch (v->dtdv_scope) { case DIFV_SCOPE_GLOBAL: if (maxglobal == -1 || ndx > maxglobal) maxglobal = ndx; if (ndx < vstate->dtvs_nglobals) { dtrace_statvar_t *svar; if ((svar = vstate->dtvs_globals[ndx]) != NULL) existing = &svar->dtsv_var; } break; case DIFV_SCOPE_THREAD: if (maxtlocal == -1 || ndx > maxtlocal) maxtlocal = ndx; if (ndx < vstate->dtvs_ntlocals) existing = &vstate->dtvs_tlocals[ndx]; break; case DIFV_SCOPE_LOCAL: if (maxlocal == -1 || ndx > maxlocal) maxlocal = ndx; if (ndx < vstate->dtvs_nlocals) { dtrace_statvar_t *svar; if ((svar = vstate->dtvs_locals[ndx]) != NULL) existing = &svar->dtsv_var; } break; } vt = &v->dtdv_type; if (vt->dtdt_flags & DIF_TF_BYREF) { if (vt->dtdt_size == 0) { err += efunc(i, "zero-sized variable\n"); break; } if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL || v->dtdv_scope == DIFV_SCOPE_LOCAL) && vt->dtdt_size > dtrace_statvar_maxsize) { err += efunc(i, "oversized by-ref static\n"); break; } } if (existing == NULL || existing->dtdv_id == 0) continue; ASSERT(existing->dtdv_id == v->dtdv_id); ASSERT(existing->dtdv_scope == v->dtdv_scope); if (existing->dtdv_kind != v->dtdv_kind) err += efunc(i, "%d changed variable kind\n", id); et = &existing->dtdv_type; if (vt->dtdt_flags != et->dtdt_flags) { err += efunc(i, "%d changed variable type flags\n", id); break; } if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { err += efunc(i, "%d changed variable type size\n", id); break; } } for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { dif_instr_t instr = dp->dtdo_buf[pc]; uint_t v = DIF_INSTR_VAR(instr); uint_t op = DIF_INSTR_OP(instr); switch (op) { case DIF_OP_LDGS: case DIF_OP_LDGAA: case DIF_OP_STGS: case DIF_OP_STGAA: if (v > DIF_VAR_OTHER_UBASE + maxglobal) err += efunc(pc, "invalid variable %u\n", v); break; case DIF_OP_LDTS: case DIF_OP_LDTAA: case DIF_OP_STTS: case DIF_OP_STTAA: if (v > DIF_VAR_OTHER_UBASE + maxtlocal) err += efunc(pc, "invalid variable %u\n", v); break; case DIF_OP_LDLS: case DIF_OP_STLS: if (v > DIF_VAR_OTHER_UBASE + maxlocal) err += efunc(pc, "invalid variable %u\n", v); break; default: break; } } return (err); } /* * Validate a DTrace DIF object that it is to be used as a helper. Helpers * are much more constrained than normal DIFOs. Specifically, they may * not: * * 1. Make calls to subroutines other than copyin(), copyinstr() or * miscellaneous string routines * 2. Access DTrace variables other than the args[] array, and the * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. * 3. Have thread-local variables. * 4. Have dynamic variables. */ static int dtrace_difo_validate_helper(dtrace_difo_t *dp) { int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; int err = 0; uint_t pc; for (pc = 0; pc < dp->dtdo_len; pc++) { dif_instr_t instr = dp->dtdo_buf[pc]; uint_t v = DIF_INSTR_VAR(instr); uint_t subr = DIF_INSTR_SUBR(instr); uint_t op = DIF_INSTR_OP(instr); switch (op) { case DIF_OP_OR: case DIF_OP_XOR: case DIF_OP_AND: case DIF_OP_SLL: case DIF_OP_SRL: case DIF_OP_SRA: case DIF_OP_SUB: case DIF_OP_ADD: case DIF_OP_MUL: case DIF_OP_SDIV: case DIF_OP_UDIV: case DIF_OP_SREM: case DIF_OP_UREM: case DIF_OP_COPYS: case DIF_OP_NOT: case DIF_OP_MOV: case DIF_OP_RLDSB: case DIF_OP_RLDSH: case DIF_OP_RLDSW: case DIF_OP_RLDUB: case DIF_OP_RLDUH: case DIF_OP_RLDUW: case DIF_OP_RLDX: case DIF_OP_ULDSB: case DIF_OP_ULDSH: case DIF_OP_ULDSW: case DIF_OP_ULDUB: case DIF_OP_ULDUH: case DIF_OP_ULDUW: case DIF_OP_ULDX: case DIF_OP_STB: case DIF_OP_STH: case DIF_OP_STW: case DIF_OP_STX: case DIF_OP_ALLOCS: case DIF_OP_CMP: case DIF_OP_SCMP: case DIF_OP_TST: case DIF_OP_BA: case DIF_OP_BE: case DIF_OP_BNE: case DIF_OP_BG: case DIF_OP_BGU: case DIF_OP_BGE: case DIF_OP_BGEU: case DIF_OP_BL: case DIF_OP_BLU: case DIF_OP_BLE: case DIF_OP_BLEU: case DIF_OP_RET: case DIF_OP_NOP: case DIF_OP_POPTS: case DIF_OP_FLUSHTS: case DIF_OP_SETX: case DIF_OP_SETS: case DIF_OP_LDGA: case DIF_OP_LDLS: case DIF_OP_STGS: case DIF_OP_STLS: case DIF_OP_PUSHTR: case DIF_OP_PUSHTV: break; case DIF_OP_LDGS: if (v >= DIF_VAR_OTHER_UBASE) break; if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) break; if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || v == DIF_VAR_PPID || v == DIF_VAR_TID || v == DIF_VAR_EXECARGS || v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || v == DIF_VAR_UID || v == DIF_VAR_GID) break; err += efunc(pc, "illegal variable %u\n", v); break; case DIF_OP_LDTA: case DIF_OP_LDTS: case DIF_OP_LDGAA: case DIF_OP_LDTAA: err += efunc(pc, "illegal dynamic variable load\n"); break; case DIF_OP_STTS: case DIF_OP_STGAA: case DIF_OP_STTAA: err += efunc(pc, "illegal dynamic variable store\n"); break; case DIF_OP_CALL: if (subr == DIF_SUBR_ALLOCA || subr == DIF_SUBR_BCOPY || subr == DIF_SUBR_COPYIN || subr == DIF_SUBR_COPYINTO || subr == DIF_SUBR_COPYINSTR || subr == DIF_SUBR_INDEX || subr == DIF_SUBR_INET_NTOA || subr == DIF_SUBR_INET_NTOA6 || subr == DIF_SUBR_INET_NTOP || subr == DIF_SUBR_JSON || subr == DIF_SUBR_LLTOSTR || subr == DIF_SUBR_STRTOLL || subr == DIF_SUBR_RINDEX || subr == DIF_SUBR_STRCHR || subr == DIF_SUBR_STRJOIN || subr == DIF_SUBR_STRRCHR || subr == DIF_SUBR_STRSTR || subr == DIF_SUBR_HTONS || subr == DIF_SUBR_HTONL || subr == DIF_SUBR_HTONLL || subr == DIF_SUBR_NTOHS || subr == DIF_SUBR_NTOHL || subr == DIF_SUBR_NTOHLL || subr == DIF_SUBR_MEMREF) break; #ifdef __FreeBSD__ if (subr == DIF_SUBR_MEMSTR) break; #endif err += efunc(pc, "invalid subr %u\n", subr); break; default: err += efunc(pc, "invalid opcode %u\n", DIF_INSTR_OP(instr)); } } return (err); } /* * Returns 1 if the expression in the DIF object can be cached on a per-thread * basis; 0 if not. */ static int dtrace_difo_cacheable(dtrace_difo_t *dp) { int i; if (dp == NULL) return (0); for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) continue; switch (v->dtdv_id) { case DIF_VAR_CURTHREAD: case DIF_VAR_PID: case DIF_VAR_TID: case DIF_VAR_EXECARGS: case DIF_VAR_EXECNAME: case DIF_VAR_ZONENAME: break; default: return (0); } } /* * This DIF object may be cacheable. Now we need to look for any * array loading instructions, any memory loading instructions, or * any stores to thread-local variables. */ for (i = 0; i < dp->dtdo_len; i++) { uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || op == DIF_OP_LDGA || op == DIF_OP_STTS) return (0); } return (1); } static void dtrace_difo_hold(dtrace_difo_t *dp) { int i; ASSERT(MUTEX_HELD(&dtrace_lock)); dp->dtdo_refcnt++; ASSERT(dp->dtdo_refcnt != 0); /* * We need to check this DIF object for references to the variable * DIF_VAR_VTIMESTAMP. */ for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; if (v->dtdv_id != DIF_VAR_VTIMESTAMP) continue; if (dtrace_vtime_references++ == 0) dtrace_vtime_enable(); } } /* * This routine calculates the dynamic variable chunksize for a given DIF * object. The calculation is not fool-proof, and can probably be tricked by * malicious DIF -- but it works for all compiler-generated DIF. Because this * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail * if a dynamic variable size exceeds the chunksize. */ static void dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { uint64_t sval = 0; dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ const dif_instr_t *text = dp->dtdo_buf; uint_t pc, srd = 0; uint_t ttop = 0; size_t size, ksize; uint_t id, i; for (pc = 0; pc < dp->dtdo_len; pc++) { dif_instr_t instr = text[pc]; uint_t op = DIF_INSTR_OP(instr); uint_t rd = DIF_INSTR_RD(instr); uint_t r1 = DIF_INSTR_R1(instr); uint_t nkeys = 0; uchar_t scope = 0; dtrace_key_t *key = tupregs; switch (op) { case DIF_OP_SETX: sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; srd = rd; continue; case DIF_OP_STTS: key = &tupregs[DIF_DTR_NREGS]; key[0].dttk_size = 0; key[1].dttk_size = 0; nkeys = 2; scope = DIFV_SCOPE_THREAD; break; case DIF_OP_STGAA: case DIF_OP_STTAA: nkeys = ttop; if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) key[nkeys++].dttk_size = 0; key[nkeys++].dttk_size = 0; if (op == DIF_OP_STTAA) { scope = DIFV_SCOPE_THREAD; } else { scope = DIFV_SCOPE_GLOBAL; } break; case DIF_OP_PUSHTR: if (ttop == DIF_DTR_NREGS) return; if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { /* * If the register for the size of the "pushtr" * is %r0 (or the value is 0) and the type is * a string, we'll use the system-wide default * string size. */ tupregs[ttop++].dttk_size = dtrace_strsize_default; } else { if (srd == 0) return; if (sval > LONG_MAX) return; tupregs[ttop++].dttk_size = sval; } break; case DIF_OP_PUSHTV: if (ttop == DIF_DTR_NREGS) return; tupregs[ttop++].dttk_size = 0; break; case DIF_OP_FLUSHTS: ttop = 0; break; case DIF_OP_POPTS: if (ttop != 0) ttop--; break; } sval = 0; srd = 0; if (nkeys == 0) continue; /* * We have a dynamic variable allocation; calculate its size. */ for (ksize = 0, i = 0; i < nkeys; i++) ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); size = sizeof (dtrace_dynvar_t); size += sizeof (dtrace_key_t) * (nkeys - 1); size += ksize; /* * Now we need to determine the size of the stored data. */ id = DIF_INSTR_VAR(instr); for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; if (v->dtdv_id == id && v->dtdv_scope == scope) { size += v->dtdv_type.dtdt_size; break; } } if (i == dp->dtdo_varlen) return; /* * We have the size. If this is larger than the chunk size * for our dynamic variable state, reset the chunk size. */ size = P2ROUNDUP(size, sizeof (uint64_t)); /* * Before setting the chunk size, check that we're not going * to set it to a negative value... */ if (size > LONG_MAX) return; /* * ...and make certain that we didn't badly overflow. */ if (size < ksize || size < sizeof (dtrace_dynvar_t)) return; if (size > vstate->dtvs_dynvars.dtds_chunksize) vstate->dtvs_dynvars.dtds_chunksize = size; } } static void dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { int i, oldsvars, osz, nsz, otlocals, ntlocals; uint_t id; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; dtrace_statvar_t *svar, ***svarp = NULL; size_t dsize = 0; uint8_t scope = v->dtdv_scope; int *np = NULL; if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) continue; id -= DIF_VAR_OTHER_UBASE; switch (scope) { case DIFV_SCOPE_THREAD: while (id >= (otlocals = vstate->dtvs_ntlocals)) { dtrace_difv_t *tlocals; if ((ntlocals = (otlocals << 1)) == 0) ntlocals = 1; osz = otlocals * sizeof (dtrace_difv_t); nsz = ntlocals * sizeof (dtrace_difv_t); tlocals = kmem_zalloc(nsz, KM_SLEEP); if (osz != 0) { bcopy(vstate->dtvs_tlocals, tlocals, osz); kmem_free(vstate->dtvs_tlocals, osz); } vstate->dtvs_tlocals = tlocals; vstate->dtvs_ntlocals = ntlocals; } vstate->dtvs_tlocals[id] = *v; continue; case DIFV_SCOPE_LOCAL: np = &vstate->dtvs_nlocals; svarp = &vstate->dtvs_locals; if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) dsize = NCPU * (v->dtdv_type.dtdt_size + sizeof (uint64_t)); else dsize = NCPU * sizeof (uint64_t); break; case DIFV_SCOPE_GLOBAL: np = &vstate->dtvs_nglobals; svarp = &vstate->dtvs_globals; if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) dsize = v->dtdv_type.dtdt_size + sizeof (uint64_t); break; default: ASSERT(0); } while (id >= (oldsvars = *np)) { dtrace_statvar_t **statics; int newsvars, oldsize, newsize; if ((newsvars = (oldsvars << 1)) == 0) newsvars = 1; oldsize = oldsvars * sizeof (dtrace_statvar_t *); newsize = newsvars * sizeof (dtrace_statvar_t *); statics = kmem_zalloc(newsize, KM_SLEEP); if (oldsize != 0) { bcopy(*svarp, statics, oldsize); kmem_free(*svarp, oldsize); } *svarp = statics; *np = newsvars; } if ((svar = (*svarp)[id]) == NULL) { svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); svar->dtsv_var = *v; if ((svar->dtsv_size = dsize) != 0) { svar->dtsv_data = (uint64_t)(uintptr_t) kmem_zalloc(dsize, KM_SLEEP); } (*svarp)[id] = svar; } svar->dtsv_refcnt++; } dtrace_difo_chunksize(dp, vstate); dtrace_difo_hold(dp); } static dtrace_difo_t * dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { dtrace_difo_t *new; size_t sz; ASSERT(dp->dtdo_buf != NULL); ASSERT(dp->dtdo_refcnt != 0); new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); ASSERT(dp->dtdo_buf != NULL); sz = dp->dtdo_len * sizeof (dif_instr_t); new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); bcopy(dp->dtdo_buf, new->dtdo_buf, sz); new->dtdo_len = dp->dtdo_len; if (dp->dtdo_strtab != NULL) { ASSERT(dp->dtdo_strlen != 0); new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); new->dtdo_strlen = dp->dtdo_strlen; } if (dp->dtdo_inttab != NULL) { ASSERT(dp->dtdo_intlen != 0); sz = dp->dtdo_intlen * sizeof (uint64_t); new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); new->dtdo_intlen = dp->dtdo_intlen; } if (dp->dtdo_vartab != NULL) { ASSERT(dp->dtdo_varlen != 0); sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); new->dtdo_varlen = dp->dtdo_varlen; } dtrace_difo_init(new, vstate); return (new); } static void dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { int i; ASSERT(dp->dtdo_refcnt == 0); for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; dtrace_statvar_t *svar, **svarp = NULL; uint_t id; uint8_t scope = v->dtdv_scope; int *np = NULL; switch (scope) { case DIFV_SCOPE_THREAD: continue; case DIFV_SCOPE_LOCAL: np = &vstate->dtvs_nlocals; svarp = vstate->dtvs_locals; break; case DIFV_SCOPE_GLOBAL: np = &vstate->dtvs_nglobals; svarp = vstate->dtvs_globals; break; default: ASSERT(0); } if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) continue; id -= DIF_VAR_OTHER_UBASE; ASSERT(id < *np); svar = svarp[id]; ASSERT(svar != NULL); ASSERT(svar->dtsv_refcnt > 0); if (--svar->dtsv_refcnt > 0) continue; if (svar->dtsv_size != 0) { ASSERT(svar->dtsv_data != 0); kmem_free((void *)(uintptr_t)svar->dtsv_data, svar->dtsv_size); } kmem_free(svar, sizeof (dtrace_statvar_t)); svarp[id] = NULL; } if (dp->dtdo_buf != NULL) kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); if (dp->dtdo_inttab != NULL) kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); if (dp->dtdo_strtab != NULL) kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); if (dp->dtdo_vartab != NULL) kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); kmem_free(dp, sizeof (dtrace_difo_t)); } static void dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) { int i; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(dp->dtdo_refcnt != 0); for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; if (v->dtdv_id != DIF_VAR_VTIMESTAMP) continue; ASSERT(dtrace_vtime_references > 0); if (--dtrace_vtime_references == 0) dtrace_vtime_disable(); } if (--dp->dtdo_refcnt == 0) dtrace_difo_destroy(dp, vstate); } /* * DTrace Format Functions */ static uint16_t dtrace_format_add(dtrace_state_t *state, char *str) { char *fmt, **new; uint16_t ndx, len = strlen(str) + 1; fmt = kmem_zalloc(len, KM_SLEEP); bcopy(str, fmt, len); for (ndx = 0; ndx < state->dts_nformats; ndx++) { if (state->dts_formats[ndx] == NULL) { state->dts_formats[ndx] = fmt; return (ndx + 1); } } if (state->dts_nformats == USHRT_MAX) { /* * This is only likely if a denial-of-service attack is being * attempted. As such, it's okay to fail silently here. */ kmem_free(fmt, len); return (0); } /* * For simplicity, we always resize the formats array to be exactly the * number of formats. */ ndx = state->dts_nformats++; new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); if (state->dts_formats != NULL) { ASSERT(ndx != 0); bcopy(state->dts_formats, new, ndx * sizeof (char *)); kmem_free(state->dts_formats, ndx * sizeof (char *)); } state->dts_formats = new; state->dts_formats[ndx] = fmt; return (ndx + 1); } static void dtrace_format_remove(dtrace_state_t *state, uint16_t format) { char *fmt; ASSERT(state->dts_formats != NULL); ASSERT(format <= state->dts_nformats); ASSERT(state->dts_formats[format - 1] != NULL); fmt = state->dts_formats[format - 1]; kmem_free(fmt, strlen(fmt) + 1); state->dts_formats[format - 1] = NULL; } static void dtrace_format_destroy(dtrace_state_t *state) { int i; if (state->dts_nformats == 0) { ASSERT(state->dts_formats == NULL); return; } ASSERT(state->dts_formats != NULL); for (i = 0; i < state->dts_nformats; i++) { char *fmt = state->dts_formats[i]; if (fmt == NULL) continue; kmem_free(fmt, strlen(fmt) + 1); } kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); state->dts_nformats = 0; state->dts_formats = NULL; } /* * DTrace Predicate Functions */ static dtrace_predicate_t * dtrace_predicate_create(dtrace_difo_t *dp) { dtrace_predicate_t *pred; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(dp->dtdo_refcnt != 0); pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); pred->dtp_difo = dp; pred->dtp_refcnt = 1; if (!dtrace_difo_cacheable(dp)) return (pred); if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { /* * This is only theoretically possible -- we have had 2^32 * cacheable predicates on this machine. We cannot allow any * more predicates to become cacheable: as unlikely as it is, * there may be a thread caching a (now stale) predicate cache * ID. (N.B.: the temptation is being successfully resisted to * have this cmn_err() "Holy shit -- we executed this code!") */ return (pred); } pred->dtp_cacheid = dtrace_predcache_id++; return (pred); } static void dtrace_predicate_hold(dtrace_predicate_t *pred) { ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); ASSERT(pred->dtp_refcnt > 0); pred->dtp_refcnt++; } static void dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) { dtrace_difo_t *dp = pred->dtp_difo; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(dp != NULL && dp->dtdo_refcnt != 0); ASSERT(pred->dtp_refcnt > 0); if (--pred->dtp_refcnt == 0) { dtrace_difo_release(pred->dtp_difo, vstate); kmem_free(pred, sizeof (dtrace_predicate_t)); } } /* * DTrace Action Description Functions */ static dtrace_actdesc_t * dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, uint64_t uarg, uint64_t arg) { dtrace_actdesc_t *act; #ifdef illumos ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); #endif act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); act->dtad_kind = kind; act->dtad_ntuple = ntuple; act->dtad_uarg = uarg; act->dtad_arg = arg; act->dtad_refcnt = 1; return (act); } static void dtrace_actdesc_hold(dtrace_actdesc_t *act) { ASSERT(act->dtad_refcnt >= 1); act->dtad_refcnt++; } static void dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) { dtrace_actkind_t kind = act->dtad_kind; dtrace_difo_t *dp; ASSERT(act->dtad_refcnt >= 1); if (--act->dtad_refcnt != 0) return; if ((dp = act->dtad_difo) != NULL) dtrace_difo_release(dp, vstate); if (DTRACEACT_ISPRINTFLIKE(kind)) { char *str = (char *)(uintptr_t)act->dtad_arg; #ifdef illumos ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); #endif if (str != NULL) kmem_free(str, strlen(str) + 1); } kmem_free(act, sizeof (dtrace_actdesc_t)); } /* * DTrace ECB Functions */ static dtrace_ecb_t * dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) { dtrace_ecb_t *ecb; dtrace_epid_t epid; ASSERT(MUTEX_HELD(&dtrace_lock)); ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); ecb->dte_predicate = NULL; ecb->dte_probe = probe; /* * The default size is the size of the default action: recording * the header. */ ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); ecb->dte_alignment = sizeof (dtrace_epid_t); epid = state->dts_epid++; if (epid - 1 >= state->dts_necbs) { dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; int necbs = state->dts_necbs << 1; ASSERT(epid == state->dts_necbs + 1); if (necbs == 0) { ASSERT(oecbs == NULL); necbs = 1; } ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); if (oecbs != NULL) bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); dtrace_membar_producer(); state->dts_ecbs = ecbs; if (oecbs != NULL) { /* * If this state is active, we must dtrace_sync() * before we can free the old dts_ecbs array: we're * coming in hot, and there may be active ring * buffer processing (which indexes into the dts_ecbs * array) on another CPU. */ if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) dtrace_sync(); kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); } dtrace_membar_producer(); state->dts_necbs = necbs; } ecb->dte_state = state; ASSERT(state->dts_ecbs[epid - 1] == NULL); dtrace_membar_producer(); state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; return (ecb); } static void dtrace_ecb_enable(dtrace_ecb_t *ecb) { dtrace_probe_t *probe = ecb->dte_probe; ASSERT(MUTEX_HELD(&cpu_lock)); ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(ecb->dte_next == NULL); if (probe == NULL) { /* * This is the NULL probe -- there's nothing to do. */ return; } if (probe->dtpr_ecb == NULL) { dtrace_provider_t *prov = probe->dtpr_provider; /* * We're the first ECB on this probe. */ probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; if (ecb->dte_predicate != NULL) probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; prov->dtpv_pops.dtps_enable(prov->dtpv_arg, probe->dtpr_id, probe->dtpr_arg); } else { /* * This probe is already active. Swing the last pointer to * point to the new ECB, and issue a dtrace_sync() to assure * that all CPUs have seen the change. */ ASSERT(probe->dtpr_ecb_last != NULL); probe->dtpr_ecb_last->dte_next = ecb; probe->dtpr_ecb_last = ecb; probe->dtpr_predcache = 0; dtrace_sync(); } } static int dtrace_ecb_resize(dtrace_ecb_t *ecb) { dtrace_action_t *act; uint32_t curneeded = UINT32_MAX; uint32_t aggbase = UINT32_MAX; /* * If we record anything, we always record the dtrace_rechdr_t. (And * we always record it first.) */ ecb->dte_size = sizeof (dtrace_rechdr_t); ecb->dte_alignment = sizeof (dtrace_epid_t); for (act = ecb->dte_action; act != NULL; act = act->dta_next) { dtrace_recdesc_t *rec = &act->dta_rec; ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); ecb->dte_alignment = MAX(ecb->dte_alignment, rec->dtrd_alignment); if (DTRACEACT_ISAGG(act->dta_kind)) { dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; ASSERT(rec->dtrd_size != 0); ASSERT(agg->dtag_first != NULL); ASSERT(act->dta_prev->dta_intuple); ASSERT(aggbase != UINT32_MAX); ASSERT(curneeded != UINT32_MAX); agg->dtag_base = aggbase; curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); rec->dtrd_offset = curneeded; if (curneeded + rec->dtrd_size < curneeded) return (EINVAL); curneeded += rec->dtrd_size; ecb->dte_needed = MAX(ecb->dte_needed, curneeded); aggbase = UINT32_MAX; curneeded = UINT32_MAX; } else if (act->dta_intuple) { if (curneeded == UINT32_MAX) { /* * This is the first record in a tuple. Align * curneeded to be at offset 4 in an 8-byte * aligned block. */ ASSERT(act->dta_prev == NULL || !act->dta_prev->dta_intuple); ASSERT3U(aggbase, ==, UINT32_MAX); curneeded = P2PHASEUP(ecb->dte_size, sizeof (uint64_t), sizeof (dtrace_aggid_t)); aggbase = curneeded - sizeof (dtrace_aggid_t); ASSERT(IS_P2ALIGNED(aggbase, sizeof (uint64_t))); } curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); rec->dtrd_offset = curneeded; if (curneeded + rec->dtrd_size < curneeded) return (EINVAL); curneeded += rec->dtrd_size; } else { /* tuples must be followed by an aggregation */ ASSERT(act->dta_prev == NULL || !act->dta_prev->dta_intuple); ecb->dte_size = P2ROUNDUP(ecb->dte_size, rec->dtrd_alignment); rec->dtrd_offset = ecb->dte_size; if (ecb->dte_size + rec->dtrd_size < ecb->dte_size) return (EINVAL); ecb->dte_size += rec->dtrd_size; ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); } } if ((act = ecb->dte_action) != NULL && !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && ecb->dte_size == sizeof (dtrace_rechdr_t)) { /* * If the size is still sizeof (dtrace_rechdr_t), then all * actions store no data; set the size to 0. */ ecb->dte_size = 0; } ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, ecb->dte_needed); return (0); } static dtrace_action_t * dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) { dtrace_aggregation_t *agg; size_t size = sizeof (uint64_t); int ntuple = desc->dtad_ntuple; dtrace_action_t *act; dtrace_recdesc_t *frec; dtrace_aggid_t aggid; dtrace_state_t *state = ecb->dte_state; agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); agg->dtag_ecb = ecb; ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); switch (desc->dtad_kind) { case DTRACEAGG_MIN: agg->dtag_initial = INT64_MAX; agg->dtag_aggregate = dtrace_aggregate_min; break; case DTRACEAGG_MAX: agg->dtag_initial = INT64_MIN; agg->dtag_aggregate = dtrace_aggregate_max; break; case DTRACEAGG_COUNT: agg->dtag_aggregate = dtrace_aggregate_count; break; case DTRACEAGG_QUANTIZE: agg->dtag_aggregate = dtrace_aggregate_quantize; size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * sizeof (uint64_t); break; case DTRACEAGG_LQUANTIZE: { uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); agg->dtag_initial = desc->dtad_arg; agg->dtag_aggregate = dtrace_aggregate_lquantize; if (step == 0 || levels == 0) goto err; size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); break; } case DTRACEAGG_LLQUANTIZE: { uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); int64_t v; agg->dtag_initial = desc->dtad_arg; agg->dtag_aggregate = dtrace_aggregate_llquantize; if (factor < 2 || low >= high || nsteps < factor) goto err; /* * Now check that the number of steps evenly divides a power * of the factor. (This assures both integer bucket size and * linearity within each magnitude.) */ for (v = factor; v < nsteps; v *= factor) continue; if ((v % nsteps) || (nsteps % factor)) goto err; size = (dtrace_aggregate_llquantize_bucket(factor, low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); break; } case DTRACEAGG_AVG: agg->dtag_aggregate = dtrace_aggregate_avg; size = sizeof (uint64_t) * 2; break; case DTRACEAGG_STDDEV: agg->dtag_aggregate = dtrace_aggregate_stddev; size = sizeof (uint64_t) * 4; break; case DTRACEAGG_SUM: agg->dtag_aggregate = dtrace_aggregate_sum; break; default: goto err; } agg->dtag_action.dta_rec.dtrd_size = size; if (ntuple == 0) goto err; /* * We must make sure that we have enough actions for the n-tuple. */ for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { if (DTRACEACT_ISAGG(act->dta_kind)) break; if (--ntuple == 0) { /* * This is the action with which our n-tuple begins. */ agg->dtag_first = act; goto success; } } /* * This n-tuple is short by ntuple elements. Return failure. */ ASSERT(ntuple != 0); err: kmem_free(agg, sizeof (dtrace_aggregation_t)); return (NULL); success: /* * If the last action in the tuple has a size of zero, it's actually * an expression argument for the aggregating action. */ ASSERT(ecb->dte_action_last != NULL); act = ecb->dte_action_last; if (act->dta_kind == DTRACEACT_DIFEXPR) { ASSERT(act->dta_difo != NULL); if (act->dta_difo->dtdo_rtype.dtdt_size == 0) agg->dtag_hasarg = 1; } /* * We need to allocate an id for this aggregation. */ #ifdef illumos aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, VM_BESTFIT | VM_SLEEP); #else aggid = alloc_unr(state->dts_aggid_arena); #endif if (aggid - 1 >= state->dts_naggregations) { dtrace_aggregation_t **oaggs = state->dts_aggregations; dtrace_aggregation_t **aggs; int naggs = state->dts_naggregations << 1; int onaggs = state->dts_naggregations; ASSERT(aggid == state->dts_naggregations + 1); if (naggs == 0) { ASSERT(oaggs == NULL); naggs = 1; } aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); if (oaggs != NULL) { bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); kmem_free(oaggs, onaggs * sizeof (*aggs)); } state->dts_aggregations = aggs; state->dts_naggregations = naggs; } ASSERT(state->dts_aggregations[aggid - 1] == NULL); state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; frec = &agg->dtag_first->dta_rec; if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) frec->dtrd_alignment = sizeof (dtrace_aggid_t); for (act = agg->dtag_first; act != NULL; act = act->dta_next) { ASSERT(!act->dta_intuple); act->dta_intuple = 1; } return (&agg->dtag_action); } static void dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) { dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; dtrace_state_t *state = ecb->dte_state; dtrace_aggid_t aggid = agg->dtag_id; ASSERT(DTRACEACT_ISAGG(act->dta_kind)); #ifdef illumos vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); #else free_unr(state->dts_aggid_arena, aggid); #endif ASSERT(state->dts_aggregations[aggid - 1] == agg); state->dts_aggregations[aggid - 1] = NULL; kmem_free(agg, sizeof (dtrace_aggregation_t)); } static int dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) { dtrace_action_t *action, *last; dtrace_difo_t *dp = desc->dtad_difo; uint32_t size = 0, align = sizeof (uint8_t), mask; uint16_t format = 0; dtrace_recdesc_t *rec; dtrace_state_t *state = ecb->dte_state; dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; uint64_t arg = desc->dtad_arg; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); if (DTRACEACT_ISAGG(desc->dtad_kind)) { /* * If this is an aggregating action, there must be neither * a speculate nor a commit on the action chain. */ dtrace_action_t *act; for (act = ecb->dte_action; act != NULL; act = act->dta_next) { if (act->dta_kind == DTRACEACT_COMMIT) return (EINVAL); if (act->dta_kind == DTRACEACT_SPECULATE) return (EINVAL); } action = dtrace_ecb_aggregation_create(ecb, desc); if (action == NULL) return (EINVAL); } else { if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || (desc->dtad_kind == DTRACEACT_DIFEXPR && dp != NULL && dp->dtdo_destructive)) { state->dts_destructive = 1; } switch (desc->dtad_kind) { case DTRACEACT_PRINTF: case DTRACEACT_PRINTA: case DTRACEACT_SYSTEM: case DTRACEACT_FREOPEN: case DTRACEACT_DIFEXPR: /* * We know that our arg is a string -- turn it into a * format. */ if (arg == 0) { ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || desc->dtad_kind == DTRACEACT_DIFEXPR); format = 0; } else { ASSERT(arg != 0); #ifdef illumos ASSERT(arg > KERNELBASE); #endif format = dtrace_format_add(state, (char *)(uintptr_t)arg); } /*FALLTHROUGH*/ case DTRACEACT_LIBACT: case DTRACEACT_TRACEMEM: case DTRACEACT_TRACEMEM_DYNSIZE: if (dp == NULL) return (EINVAL); if ((size = dp->dtdo_rtype.dtdt_size) != 0) break; if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) return (EINVAL); size = opt[DTRACEOPT_STRSIZE]; } break; case DTRACEACT_STACK: if ((nframes = arg) == 0) { nframes = opt[DTRACEOPT_STACKFRAMES]; ASSERT(nframes > 0); arg = nframes; } size = nframes * sizeof (pc_t); break; case DTRACEACT_JSTACK: if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) nframes = opt[DTRACEOPT_JSTACKFRAMES]; arg = DTRACE_USTACK_ARG(nframes, strsize); /*FALLTHROUGH*/ case DTRACEACT_USTACK: if (desc->dtad_kind != DTRACEACT_JSTACK && (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { strsize = DTRACE_USTACK_STRSIZE(arg); nframes = opt[DTRACEOPT_USTACKFRAMES]; ASSERT(nframes > 0); arg = DTRACE_USTACK_ARG(nframes, strsize); } /* * Save a slot for the pid. */ size = (nframes + 1) * sizeof (uint64_t); size += DTRACE_USTACK_STRSIZE(arg); size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); break; case DTRACEACT_SYM: case DTRACEACT_MOD: if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != sizeof (uint64_t)) || (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) return (EINVAL); break; case DTRACEACT_USYM: case DTRACEACT_UMOD: case DTRACEACT_UADDR: if (dp == NULL || (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) return (EINVAL); /* * We have a slot for the pid, plus a slot for the * argument. To keep things simple (aligned with * bitness-neutral sizing), we store each as a 64-bit * quantity. */ size = 2 * sizeof (uint64_t); break; case DTRACEACT_STOP: case DTRACEACT_BREAKPOINT: case DTRACEACT_PANIC: break; case DTRACEACT_CHILL: case DTRACEACT_DISCARD: case DTRACEACT_RAISE: if (dp == NULL) return (EINVAL); break; case DTRACEACT_EXIT: if (dp == NULL || (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) return (EINVAL); break; case DTRACEACT_SPECULATE: if (ecb->dte_size > sizeof (dtrace_rechdr_t)) return (EINVAL); if (dp == NULL) return (EINVAL); state->dts_speculates = 1; break; case DTRACEACT_PRINTM: size = dp->dtdo_rtype.dtdt_size; break; case DTRACEACT_COMMIT: { dtrace_action_t *act = ecb->dte_action; for (; act != NULL; act = act->dta_next) { if (act->dta_kind == DTRACEACT_COMMIT) return (EINVAL); } if (dp == NULL) return (EINVAL); break; } default: return (EINVAL); } if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { /* * If this is a data-storing action or a speculate, * we must be sure that there isn't a commit on the * action chain. */ dtrace_action_t *act = ecb->dte_action; for (; act != NULL; act = act->dta_next) { if (act->dta_kind == DTRACEACT_COMMIT) return (EINVAL); } } action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); action->dta_rec.dtrd_size = size; } action->dta_refcnt = 1; rec = &action->dta_rec; size = rec->dtrd_size; for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { if (!(size & mask)) { align = mask + 1; break; } } action->dta_kind = desc->dtad_kind; if ((action->dta_difo = dp) != NULL) dtrace_difo_hold(dp); rec->dtrd_action = action->dta_kind; rec->dtrd_arg = arg; rec->dtrd_uarg = desc->dtad_uarg; rec->dtrd_alignment = (uint16_t)align; rec->dtrd_format = format; if ((last = ecb->dte_action_last) != NULL) { ASSERT(ecb->dte_action != NULL); action->dta_prev = last; last->dta_next = action; } else { ASSERT(ecb->dte_action == NULL); ecb->dte_action = action; } ecb->dte_action_last = action; return (0); } static void dtrace_ecb_action_remove(dtrace_ecb_t *ecb) { dtrace_action_t *act = ecb->dte_action, *next; dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; dtrace_difo_t *dp; uint16_t format; if (act != NULL && act->dta_refcnt > 1) { ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); act->dta_refcnt--; } else { for (; act != NULL; act = next) { next = act->dta_next; ASSERT(next != NULL || act == ecb->dte_action_last); ASSERT(act->dta_refcnt == 1); if ((format = act->dta_rec.dtrd_format) != 0) dtrace_format_remove(ecb->dte_state, format); if ((dp = act->dta_difo) != NULL) dtrace_difo_release(dp, vstate); if (DTRACEACT_ISAGG(act->dta_kind)) { dtrace_ecb_aggregation_destroy(ecb, act); } else { kmem_free(act, sizeof (dtrace_action_t)); } } } ecb->dte_action = NULL; ecb->dte_action_last = NULL; ecb->dte_size = 0; } static void dtrace_ecb_disable(dtrace_ecb_t *ecb) { /* * We disable the ECB by removing it from its probe. */ dtrace_ecb_t *pecb, *prev = NULL; dtrace_probe_t *probe = ecb->dte_probe; ASSERT(MUTEX_HELD(&dtrace_lock)); if (probe == NULL) { /* * This is the NULL probe; there is nothing to disable. */ return; } for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { if (pecb == ecb) break; prev = pecb; } ASSERT(pecb != NULL); if (prev == NULL) { probe->dtpr_ecb = ecb->dte_next; } else { prev->dte_next = ecb->dte_next; } if (ecb == probe->dtpr_ecb_last) { ASSERT(ecb->dte_next == NULL); probe->dtpr_ecb_last = prev; } /* * The ECB has been disconnected from the probe; now sync to assure * that all CPUs have seen the change before returning. */ dtrace_sync(); if (probe->dtpr_ecb == NULL) { /* * That was the last ECB on the probe; clear the predicate * cache ID for the probe, disable it and sync one more time * to assure that we'll never hit it again. */ dtrace_provider_t *prov = probe->dtpr_provider; ASSERT(ecb->dte_next == NULL); ASSERT(probe->dtpr_ecb_last == NULL); probe->dtpr_predcache = DTRACE_CACHEIDNONE; prov->dtpv_pops.dtps_disable(prov->dtpv_arg, probe->dtpr_id, probe->dtpr_arg); dtrace_sync(); } else { /* * There is at least one ECB remaining on the probe. If there * is _exactly_ one, set the probe's predicate cache ID to be * the predicate cache ID of the remaining ECB. */ ASSERT(probe->dtpr_ecb_last != NULL); ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); if (probe->dtpr_ecb == probe->dtpr_ecb_last) { dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; ASSERT(probe->dtpr_ecb->dte_next == NULL); if (p != NULL) probe->dtpr_predcache = p->dtp_cacheid; } ecb->dte_next = NULL; } } static void dtrace_ecb_destroy(dtrace_ecb_t *ecb) { dtrace_state_t *state = ecb->dte_state; dtrace_vstate_t *vstate = &state->dts_vstate; dtrace_predicate_t *pred; dtrace_epid_t epid = ecb->dte_epid; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(ecb->dte_next == NULL); ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); if ((pred = ecb->dte_predicate) != NULL) dtrace_predicate_release(pred, vstate); dtrace_ecb_action_remove(ecb); ASSERT(state->dts_ecbs[epid - 1] == ecb); state->dts_ecbs[epid - 1] = NULL; kmem_free(ecb, sizeof (dtrace_ecb_t)); } static dtrace_ecb_t * dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, dtrace_enabling_t *enab) { dtrace_ecb_t *ecb; dtrace_predicate_t *pred; dtrace_actdesc_t *act; dtrace_provider_t *prov; dtrace_ecbdesc_t *desc = enab->dten_current; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(state != NULL); ecb = dtrace_ecb_add(state, probe); ecb->dte_uarg = desc->dted_uarg; if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { dtrace_predicate_hold(pred); ecb->dte_predicate = pred; } if (probe != NULL) { /* * If the provider shows more leg than the consumer is old * enough to see, we need to enable the appropriate implicit * predicate bits to prevent the ecb from activating at * revealing times. * * Providers specifying DTRACE_PRIV_USER at register time * are stating that they need the /proc-style privilege * model to be enforced, and this is what DTRACE_COND_OWNER * and DTRACE_COND_ZONEOWNER will then do at probe time. */ prov = probe->dtpr_provider; if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) ecb->dte_cond |= DTRACE_COND_OWNER; if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) ecb->dte_cond |= DTRACE_COND_ZONEOWNER; /* * If the provider shows us kernel innards and the user * is lacking sufficient privilege, enable the * DTRACE_COND_USERMODE implicit predicate. */ if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) ecb->dte_cond |= DTRACE_COND_USERMODE; } if (dtrace_ecb_create_cache != NULL) { /* * If we have a cached ecb, we'll use its action list instead * of creating our own (saving both time and space). */ dtrace_ecb_t *cached = dtrace_ecb_create_cache; dtrace_action_t *act = cached->dte_action; if (act != NULL) { ASSERT(act->dta_refcnt > 0); act->dta_refcnt++; ecb->dte_action = act; ecb->dte_action_last = cached->dte_action_last; ecb->dte_needed = cached->dte_needed; ecb->dte_size = cached->dte_size; ecb->dte_alignment = cached->dte_alignment; } return (ecb); } for (act = desc->dted_action; act != NULL; act = act->dtad_next) { if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { dtrace_ecb_destroy(ecb); return (NULL); } } if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) { dtrace_ecb_destroy(ecb); return (NULL); } return (dtrace_ecb_create_cache = ecb); } static int dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) { dtrace_ecb_t *ecb; dtrace_enabling_t *enab = arg; dtrace_state_t *state = enab->dten_vstate->dtvs_state; ASSERT(state != NULL); if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { /* * This probe was created in a generation for which this * enabling has previously created ECBs; we don't want to * enable it again, so just kick out. */ return (DTRACE_MATCH_NEXT); } if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) return (DTRACE_MATCH_DONE); dtrace_ecb_enable(ecb); return (DTRACE_MATCH_NEXT); } static dtrace_ecb_t * dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) { dtrace_ecb_t *ecb; ASSERT(MUTEX_HELD(&dtrace_lock)); if (id == 0 || id > state->dts_necbs) return (NULL); ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); return (state->dts_ecbs[id - 1]); } static dtrace_aggregation_t * dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) { dtrace_aggregation_t *agg; ASSERT(MUTEX_HELD(&dtrace_lock)); if (id == 0 || id > state->dts_naggregations) return (NULL); ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || agg->dtag_id == id); return (state->dts_aggregations[id - 1]); } /* * DTrace Buffer Functions * * The following functions manipulate DTrace buffers. Most of these functions * are called in the context of establishing or processing consumer state; * exceptions are explicitly noted. */ /* * Note: called from cross call context. This function switches the two * buffers on a given CPU. The atomicity of this operation is assured by * disabling interrupts while the actual switch takes place; the disabling of * interrupts serializes the execution with any execution of dtrace_probe() on * the same CPU. */ static void dtrace_buffer_switch(dtrace_buffer_t *buf) { caddr_t tomax = buf->dtb_tomax; caddr_t xamot = buf->dtb_xamot; dtrace_icookie_t cookie; hrtime_t now; ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); cookie = dtrace_interrupt_disable(); now = dtrace_gethrtime(); buf->dtb_tomax = xamot; buf->dtb_xamot = tomax; buf->dtb_xamot_drops = buf->dtb_drops; buf->dtb_xamot_offset = buf->dtb_offset; buf->dtb_xamot_errors = buf->dtb_errors; buf->dtb_xamot_flags = buf->dtb_flags; buf->dtb_offset = 0; buf->dtb_drops = 0; buf->dtb_errors = 0; buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); buf->dtb_interval = now - buf->dtb_switched; buf->dtb_switched = now; dtrace_interrupt_enable(cookie); } /* * Note: called from cross call context. This function activates a buffer * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation * is guaranteed by the disabling of interrupts. */ static void dtrace_buffer_activate(dtrace_state_t *state) { dtrace_buffer_t *buf; dtrace_icookie_t cookie = dtrace_interrupt_disable(); buf = &state->dts_buffer[curcpu]; if (buf->dtb_tomax != NULL) { /* * We might like to assert that the buffer is marked inactive, * but this isn't necessarily true: the buffer for the CPU * that processes the BEGIN probe has its buffer activated * manually. In this case, we take the (harmless) action * re-clearing the bit INACTIVE bit. */ buf->dtb_flags &= ~DTRACEBUF_INACTIVE; } dtrace_interrupt_enable(cookie); } #ifdef __FreeBSD__ /* * Activate the specified per-CPU buffer. This is used instead of * dtrace_buffer_activate() when APs have not yet started, i.e. when * activating anonymous state. */ static void dtrace_buffer_activate_cpu(dtrace_state_t *state, int cpu) { if (state->dts_buffer[cpu].dtb_tomax != NULL) state->dts_buffer[cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; } #endif static int dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, processorid_t cpu, int *factor) { #ifdef illumos cpu_t *cp; #endif dtrace_buffer_t *buf; int allocated = 0, desired = 0; #ifdef illumos ASSERT(MUTEX_HELD(&cpu_lock)); ASSERT(MUTEX_HELD(&dtrace_lock)); *factor = 1; if (size > dtrace_nonroot_maxsize && !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) return (EFBIG); cp = cpu_list; do { if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) continue; buf = &bufs[cp->cpu_id]; /* * If there is already a buffer allocated for this CPU, it * is only possible that this is a DR event. In this case, */ if (buf->dtb_tomax != NULL) { ASSERT(buf->dtb_size == size); continue; } ASSERT(buf->dtb_xamot == NULL); if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) goto err; buf->dtb_size = size; buf->dtb_flags = flags; buf->dtb_offset = 0; buf->dtb_drops = 0; if (flags & DTRACEBUF_NOSWITCH) continue; if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) goto err; } while ((cp = cp->cpu_next) != cpu_list); return (0); err: cp = cpu_list; do { if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) continue; buf = &bufs[cp->cpu_id]; desired += 2; if (buf->dtb_xamot != NULL) { ASSERT(buf->dtb_tomax != NULL); ASSERT(buf->dtb_size == size); kmem_free(buf->dtb_xamot, size); allocated++; } if (buf->dtb_tomax != NULL) { ASSERT(buf->dtb_size == size); kmem_free(buf->dtb_tomax, size); allocated++; } buf->dtb_tomax = NULL; buf->dtb_xamot = NULL; buf->dtb_size = 0; } while ((cp = cp->cpu_next) != cpu_list); #else int i; *factor = 1; #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ defined(__mips__) || defined(__powerpc__) || defined(__riscv__) /* * FreeBSD isn't good at limiting the amount of memory we * ask to malloc, so let's place a limit here before trying * to do something that might well end in tears at bedtime. */ if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) return (ENOMEM); #endif ASSERT(MUTEX_HELD(&dtrace_lock)); CPU_FOREACH(i) { if (cpu != DTRACE_CPUALL && cpu != i) continue; buf = &bufs[i]; /* * If there is already a buffer allocated for this CPU, it * is only possible that this is a DR event. In this case, * the buffer size must match our specified size. */ if (buf->dtb_tomax != NULL) { ASSERT(buf->dtb_size == size); continue; } ASSERT(buf->dtb_xamot == NULL); if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) goto err; buf->dtb_size = size; buf->dtb_flags = flags; buf->dtb_offset = 0; buf->dtb_drops = 0; if (flags & DTRACEBUF_NOSWITCH) continue; if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) goto err; } return (0); err: /* * Error allocating memory, so free the buffers that were * allocated before the failed allocation. */ CPU_FOREACH(i) { if (cpu != DTRACE_CPUALL && cpu != i) continue; buf = &bufs[i]; desired += 2; if (buf->dtb_xamot != NULL) { ASSERT(buf->dtb_tomax != NULL); ASSERT(buf->dtb_size == size); kmem_free(buf->dtb_xamot, size); allocated++; } if (buf->dtb_tomax != NULL) { ASSERT(buf->dtb_size == size); kmem_free(buf->dtb_tomax, size); allocated++; } buf->dtb_tomax = NULL; buf->dtb_xamot = NULL; buf->dtb_size = 0; } #endif *factor = desired / (allocated > 0 ? allocated : 1); return (ENOMEM); } /* * Note: called from probe context. This function just increments the drop * count on a buffer. It has been made a function to allow for the * possibility of understanding the source of mysterious drop counts. (A * problem for which one may be particularly disappointed that DTrace cannot * be used to understand DTrace.) */ static void dtrace_buffer_drop(dtrace_buffer_t *buf) { buf->dtb_drops++; } /* * Note: called from probe context. This function is called to reserve space * in a buffer. If mstate is non-NULL, sets the scratch base and size in the * mstate. Returns the new offset in the buffer, or a negative value if an * error has occurred. */ static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, dtrace_state_t *state, dtrace_mstate_t *mstate) { intptr_t offs = buf->dtb_offset, soffs; intptr_t woffs; caddr_t tomax; size_t total; if (buf->dtb_flags & DTRACEBUF_INACTIVE) return (-1); if ((tomax = buf->dtb_tomax) == NULL) { dtrace_buffer_drop(buf); return (-1); } if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { while (offs & (align - 1)) { /* * Assert that our alignment is off by a number which * is itself sizeof (uint32_t) aligned. */ ASSERT(!((align - (offs & (align - 1))) & (sizeof (uint32_t) - 1))); DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); offs += sizeof (uint32_t); } if ((soffs = offs + needed) > buf->dtb_size) { dtrace_buffer_drop(buf); return (-1); } if (mstate == NULL) return (offs); mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; mstate->dtms_scratch_size = buf->dtb_size - soffs; mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; return (offs); } if (buf->dtb_flags & DTRACEBUF_FILL) { if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && (buf->dtb_flags & DTRACEBUF_FULL)) return (-1); goto out; } total = needed + (offs & (align - 1)); /* * For a ring buffer, life is quite a bit more complicated. Before * we can store any padding, we need to adjust our wrapping offset. * (If we've never before wrapped or we're not about to, no adjustment * is required.) */ if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || offs + total > buf->dtb_size) { woffs = buf->dtb_xamot_offset; if (offs + total > buf->dtb_size) { /* * We can't fit in the end of the buffer. First, a * sanity check that we can fit in the buffer at all. */ if (total > buf->dtb_size) { dtrace_buffer_drop(buf); return (-1); } /* * We're going to be storing at the top of the buffer, * so now we need to deal with the wrapped offset. We * only reset our wrapped offset to 0 if it is * currently greater than the current offset. If it * is less than the current offset, it is because a * previous allocation induced a wrap -- but the * allocation didn't subsequently take the space due * to an error or false predicate evaluation. In this * case, we'll just leave the wrapped offset alone: if * the wrapped offset hasn't been advanced far enough * for this allocation, it will be adjusted in the * lower loop. */ if (buf->dtb_flags & DTRACEBUF_WRAPPED) { if (woffs >= offs) woffs = 0; } else { woffs = 0; } /* * Now we know that we're going to be storing to the * top of the buffer and that there is room for us * there. We need to clear the buffer from the current * offset to the end (there may be old gunk there). */ while (offs < buf->dtb_size) tomax[offs++] = 0; /* * We need to set our offset to zero. And because we * are wrapping, we need to set the bit indicating as * much. We can also adjust our needed space back * down to the space required by the ECB -- we know * that the top of the buffer is aligned. */ offs = 0; total = needed; buf->dtb_flags |= DTRACEBUF_WRAPPED; } else { /* * There is room for us in the buffer, so we simply * need to check the wrapped offset. */ if (woffs < offs) { /* * The wrapped offset is less than the offset. * This can happen if we allocated buffer space * that induced a wrap, but then we didn't * subsequently take the space due to an error * or false predicate evaluation. This is * okay; we know that _this_ allocation isn't * going to induce a wrap. We still can't * reset the wrapped offset to be zero, * however: the space may have been trashed in * the previous failed probe attempt. But at * least the wrapped offset doesn't need to * be adjusted at all... */ goto out; } } while (offs + total > woffs) { dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); size_t size; if (epid == DTRACE_EPIDNONE) { size = sizeof (uint32_t); } else { ASSERT3U(epid, <=, state->dts_necbs); ASSERT(state->dts_ecbs[epid - 1] != NULL); size = state->dts_ecbs[epid - 1]->dte_size; } ASSERT(woffs + size <= buf->dtb_size); ASSERT(size != 0); if (woffs + size == buf->dtb_size) { /* * We've reached the end of the buffer; we want * to set the wrapped offset to 0 and break * out. However, if the offs is 0, then we're * in a strange edge-condition: the amount of * space that we want to reserve plus the size * of the record that we're overwriting is * greater than the size of the buffer. This * is problematic because if we reserve the * space but subsequently don't consume it (due * to a failed predicate or error) the wrapped * offset will be 0 -- yet the EPID at offset 0 * will not be committed. This situation is * relatively easy to deal with: if we're in * this case, the buffer is indistinguishable * from one that hasn't wrapped; we need only * finish the job by clearing the wrapped bit, * explicitly setting the offset to be 0, and * zero'ing out the old data in the buffer. */ if (offs == 0) { buf->dtb_flags &= ~DTRACEBUF_WRAPPED; buf->dtb_offset = 0; woffs = total; while (woffs < buf->dtb_size) tomax[woffs++] = 0; } woffs = 0; break; } woffs += size; } /* * We have a wrapped offset. It may be that the wrapped offset * has become zero -- that's okay. */ buf->dtb_xamot_offset = woffs; } out: /* * Now we can plow the buffer with any necessary padding. */ while (offs & (align - 1)) { /* * Assert that our alignment is off by a number which * is itself sizeof (uint32_t) aligned. */ ASSERT(!((align - (offs & (align - 1))) & (sizeof (uint32_t) - 1))); DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); offs += sizeof (uint32_t); } if (buf->dtb_flags & DTRACEBUF_FILL) { if (offs + needed > buf->dtb_size - state->dts_reserve) { buf->dtb_flags |= DTRACEBUF_FULL; return (-1); } } if (mstate == NULL) return (offs); /* * For ring buffers and fill buffers, the scratch space is always * the inactive buffer. */ mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; mstate->dtms_scratch_size = buf->dtb_size; mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; return (offs); } static void dtrace_buffer_polish(dtrace_buffer_t *buf) { ASSERT(buf->dtb_flags & DTRACEBUF_RING); ASSERT(MUTEX_HELD(&dtrace_lock)); if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) return; /* * We need to polish the ring buffer. There are three cases: * * - The first (and presumably most common) is that there is no gap * between the buffer offset and the wrapped offset. In this case, * there is nothing in the buffer that isn't valid data; we can * mark the buffer as polished and return. * * - The second (less common than the first but still more common * than the third) is that there is a gap between the buffer offset * and the wrapped offset, and the wrapped offset is larger than the * buffer offset. This can happen because of an alignment issue, or * can happen because of a call to dtrace_buffer_reserve() that * didn't subsequently consume the buffer space. In this case, * we need to zero the data from the buffer offset to the wrapped * offset. * * - The third (and least common) is that there is a gap between the * buffer offset and the wrapped offset, but the wrapped offset is * _less_ than the buffer offset. This can only happen because a * call to dtrace_buffer_reserve() induced a wrap, but the space * was not subsequently consumed. In this case, we need to zero the * space from the offset to the end of the buffer _and_ from the * top of the buffer to the wrapped offset. */ if (buf->dtb_offset < buf->dtb_xamot_offset) { bzero(buf->dtb_tomax + buf->dtb_offset, buf->dtb_xamot_offset - buf->dtb_offset); } if (buf->dtb_offset > buf->dtb_xamot_offset) { bzero(buf->dtb_tomax + buf->dtb_offset, buf->dtb_size - buf->dtb_offset); bzero(buf->dtb_tomax, buf->dtb_xamot_offset); } } /* * This routine determines if data generated at the specified time has likely * been entirely consumed at user-level. This routine is called to determine * if an ECB on a defunct probe (but for an active enabling) can be safely * disabled and destroyed. */ static int dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) { int i; for (i = 0; i < NCPU; i++) { dtrace_buffer_t *buf = &bufs[i]; if (buf->dtb_size == 0) continue; if (buf->dtb_flags & DTRACEBUF_RING) return (0); if (!buf->dtb_switched && buf->dtb_offset != 0) return (0); if (buf->dtb_switched - buf->dtb_interval < when) return (0); } return (1); } static void dtrace_buffer_free(dtrace_buffer_t *bufs) { int i; for (i = 0; i < NCPU; i++) { dtrace_buffer_t *buf = &bufs[i]; if (buf->dtb_tomax == NULL) { ASSERT(buf->dtb_xamot == NULL); ASSERT(buf->dtb_size == 0); continue; } if (buf->dtb_xamot != NULL) { ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); kmem_free(buf->dtb_xamot, buf->dtb_size); } kmem_free(buf->dtb_tomax, buf->dtb_size); buf->dtb_size = 0; buf->dtb_tomax = NULL; buf->dtb_xamot = NULL; } } /* * DTrace Enabling Functions */ static dtrace_enabling_t * dtrace_enabling_create(dtrace_vstate_t *vstate) { dtrace_enabling_t *enab; enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); enab->dten_vstate = vstate; return (enab); } static void dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) { dtrace_ecbdesc_t **ndesc; size_t osize, nsize; /* * We can't add to enablings after we've enabled them, or after we've * retained them. */ ASSERT(enab->dten_probegen == 0); ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); if (enab->dten_ndesc < enab->dten_maxdesc) { enab->dten_desc[enab->dten_ndesc++] = ecb; return; } osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); if (enab->dten_maxdesc == 0) { enab->dten_maxdesc = 1; } else { enab->dten_maxdesc <<= 1; } ASSERT(enab->dten_ndesc < enab->dten_maxdesc); nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); ndesc = kmem_zalloc(nsize, KM_SLEEP); bcopy(enab->dten_desc, ndesc, osize); if (enab->dten_desc != NULL) kmem_free(enab->dten_desc, osize); enab->dten_desc = ndesc; enab->dten_desc[enab->dten_ndesc++] = ecb; } static void dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, dtrace_probedesc_t *pd) { dtrace_ecbdesc_t *new; dtrace_predicate_t *pred; dtrace_actdesc_t *act; /* * We're going to create a new ECB description that matches the * specified ECB in every way, but has the specified probe description. */ new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) dtrace_predicate_hold(pred); for (act = ecb->dted_action; act != NULL; act = act->dtad_next) dtrace_actdesc_hold(act); new->dted_action = ecb->dted_action; new->dted_pred = ecb->dted_pred; new->dted_probe = *pd; new->dted_uarg = ecb->dted_uarg; dtrace_enabling_add(enab, new); } static void dtrace_enabling_dump(dtrace_enabling_t *enab) { int i; for (i = 0; i < enab->dten_ndesc; i++) { dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; #ifdef __FreeBSD__ printf("dtrace: enabling probe %d (%s:%s:%s:%s)\n", i, desc->dtpd_provider, desc->dtpd_mod, desc->dtpd_func, desc->dtpd_name); #else cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, desc->dtpd_provider, desc->dtpd_mod, desc->dtpd_func, desc->dtpd_name); #endif } } static void dtrace_enabling_destroy(dtrace_enabling_t *enab) { int i; dtrace_ecbdesc_t *ep; dtrace_vstate_t *vstate = enab->dten_vstate; ASSERT(MUTEX_HELD(&dtrace_lock)); for (i = 0; i < enab->dten_ndesc; i++) { dtrace_actdesc_t *act, *next; dtrace_predicate_t *pred; ep = enab->dten_desc[i]; if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) dtrace_predicate_release(pred, vstate); for (act = ep->dted_action; act != NULL; act = next) { next = act->dtad_next; dtrace_actdesc_release(act, vstate); } kmem_free(ep, sizeof (dtrace_ecbdesc_t)); } if (enab->dten_desc != NULL) kmem_free(enab->dten_desc, enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); /* * If this was a retained enabling, decrement the dts_nretained count * and take it off of the dtrace_retained list. */ if (enab->dten_prev != NULL || enab->dten_next != NULL || dtrace_retained == enab) { ASSERT(enab->dten_vstate->dtvs_state != NULL); ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); enab->dten_vstate->dtvs_state->dts_nretained--; dtrace_retained_gen++; } if (enab->dten_prev == NULL) { if (dtrace_retained == enab) { dtrace_retained = enab->dten_next; if (dtrace_retained != NULL) dtrace_retained->dten_prev = NULL; } } else { ASSERT(enab != dtrace_retained); ASSERT(dtrace_retained != NULL); enab->dten_prev->dten_next = enab->dten_next; } if (enab->dten_next != NULL) { ASSERT(dtrace_retained != NULL); enab->dten_next->dten_prev = enab->dten_prev; } kmem_free(enab, sizeof (dtrace_enabling_t)); } static int dtrace_enabling_retain(dtrace_enabling_t *enab) { dtrace_state_t *state; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); ASSERT(enab->dten_vstate != NULL); state = enab->dten_vstate->dtvs_state; ASSERT(state != NULL); /* * We only allow each state to retain dtrace_retain_max enablings. */ if (state->dts_nretained >= dtrace_retain_max) return (ENOSPC); state->dts_nretained++; dtrace_retained_gen++; if (dtrace_retained == NULL) { dtrace_retained = enab; return (0); } enab->dten_next = dtrace_retained; dtrace_retained->dten_prev = enab; dtrace_retained = enab; return (0); } static int dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, dtrace_probedesc_t *create) { dtrace_enabling_t *new, *enab; int found = 0, err = ENOENT; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); new = dtrace_enabling_create(&state->dts_vstate); /* * Iterate over all retained enablings, looking for enablings that * match the specified state. */ for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { int i; /* * dtvs_state can only be NULL for helper enablings -- and * helper enablings can't be retained. */ ASSERT(enab->dten_vstate->dtvs_state != NULL); if (enab->dten_vstate->dtvs_state != state) continue; /* * Now iterate over each probe description; we're looking for * an exact match to the specified probe description. */ for (i = 0; i < enab->dten_ndesc; i++) { dtrace_ecbdesc_t *ep = enab->dten_desc[i]; dtrace_probedesc_t *pd = &ep->dted_probe; if (strcmp(pd->dtpd_provider, match->dtpd_provider)) continue; if (strcmp(pd->dtpd_mod, match->dtpd_mod)) continue; if (strcmp(pd->dtpd_func, match->dtpd_func)) continue; if (strcmp(pd->dtpd_name, match->dtpd_name)) continue; /* * We have a winning probe! Add it to our growing * enabling. */ found = 1; dtrace_enabling_addlike(new, ep, create); } } if (!found || (err = dtrace_enabling_retain(new)) != 0) { dtrace_enabling_destroy(new); return (err); } return (0); } static void dtrace_enabling_retract(dtrace_state_t *state) { dtrace_enabling_t *enab, *next; ASSERT(MUTEX_HELD(&dtrace_lock)); /* * Iterate over all retained enablings, destroy the enablings retained * for the specified state. */ for (enab = dtrace_retained; enab != NULL; enab = next) { next = enab->dten_next; /* * dtvs_state can only be NULL for helper enablings -- and * helper enablings can't be retained. */ ASSERT(enab->dten_vstate->dtvs_state != NULL); if (enab->dten_vstate->dtvs_state == state) { ASSERT(state->dts_nretained > 0); dtrace_enabling_destroy(enab); } } ASSERT(state->dts_nretained == 0); } static int dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) { int i = 0; int matched = 0; ASSERT(MUTEX_HELD(&cpu_lock)); ASSERT(MUTEX_HELD(&dtrace_lock)); for (i = 0; i < enab->dten_ndesc; i++) { dtrace_ecbdesc_t *ep = enab->dten_desc[i]; enab->dten_current = ep; enab->dten_error = 0; matched += dtrace_probe_enable(&ep->dted_probe, enab); if (enab->dten_error != 0) { /* * If we get an error half-way through enabling the * probes, we kick out -- perhaps with some number of * them enabled. Leaving enabled probes enabled may * be slightly confusing for user-level, but we expect * that no one will attempt to actually drive on in * the face of such errors. If this is an anonymous * enabling (indicated with a NULL nmatched pointer), * we cmn_err() a message. We aren't expecting to * get such an error -- such as it can exist at all, * it would be a result of corrupted DOF in the driver * properties. */ if (nmatched == NULL) { cmn_err(CE_WARN, "dtrace_enabling_match() " "error on %p: %d", (void *)ep, enab->dten_error); } return (enab->dten_error); } } enab->dten_probegen = dtrace_probegen; if (nmatched != NULL) *nmatched = matched; return (0); } static void dtrace_enabling_matchall(void) { dtrace_enabling_t *enab; mutex_enter(&cpu_lock); mutex_enter(&dtrace_lock); /* * Iterate over all retained enablings to see if any probes match * against them. We only perform this operation on enablings for which * we have sufficient permissions by virtue of being in the global zone * or in the same zone as the DTrace client. Because we can be called * after dtrace_detach() has been called, we cannot assert that there * are retained enablings. We can safely load from dtrace_retained, * however: the taskq_destroy() at the end of dtrace_detach() will * block pending our completion. */ for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { #ifdef illumos cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; if (INGLOBALZONE(curproc) || cr != NULL && getzoneid() == crgetzoneid(cr)) #endif (void) dtrace_enabling_match(enab, NULL); } mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); } /* * If an enabling is to be enabled without having matched probes (that is, if * dtrace_state_go() is to be called on the underlying dtrace_state_t), the * enabling must be _primed_ by creating an ECB for every ECB description. * This must be done to assure that we know the number of speculations, the * number of aggregations, the minimum buffer size needed, etc. before we * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually * enabling any probes, we create ECBs for every ECB decription, but with a * NULL probe -- which is exactly what this function does. */ static void dtrace_enabling_prime(dtrace_state_t *state) { dtrace_enabling_t *enab; int i; for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { ASSERT(enab->dten_vstate->dtvs_state != NULL); if (enab->dten_vstate->dtvs_state != state) continue; /* * We don't want to prime an enabling more than once, lest * we allow a malicious user to induce resource exhaustion. * (The ECBs that result from priming an enabling aren't * leaked -- but they also aren't deallocated until the * consumer state is destroyed.) */ if (enab->dten_primed) continue; for (i = 0; i < enab->dten_ndesc; i++) { enab->dten_current = enab->dten_desc[i]; (void) dtrace_probe_enable(NULL, enab); } enab->dten_primed = 1; } } /* * Called to indicate that probes should be provided due to retained * enablings. This is implemented in terms of dtrace_probe_provide(), but it * must take an initial lap through the enabling calling the dtps_provide() * entry point explicitly to allow for autocreated probes. */ static void dtrace_enabling_provide(dtrace_provider_t *prv) { int i, all = 0; dtrace_probedesc_t desc; dtrace_genid_t gen; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(MUTEX_HELD(&dtrace_provider_lock)); if (prv == NULL) { all = 1; prv = dtrace_provider; } do { dtrace_enabling_t *enab; void *parg = prv->dtpv_arg; retry: gen = dtrace_retained_gen; for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { for (i = 0; i < enab->dten_ndesc; i++) { desc = enab->dten_desc[i]->dted_probe; mutex_exit(&dtrace_lock); prv->dtpv_pops.dtps_provide(parg, &desc); mutex_enter(&dtrace_lock); /* * Process the retained enablings again if * they have changed while we weren't holding * dtrace_lock. */ if (gen != dtrace_retained_gen) goto retry; } } } while (all && (prv = prv->dtpv_next) != NULL); mutex_exit(&dtrace_lock); dtrace_probe_provide(NULL, all ? NULL : prv); mutex_enter(&dtrace_lock); } /* * Called to reap ECBs that are attached to probes from defunct providers. */ static void dtrace_enabling_reap(void) { dtrace_provider_t *prov; dtrace_probe_t *probe; dtrace_ecb_t *ecb; hrtime_t when; int i; mutex_enter(&cpu_lock); mutex_enter(&dtrace_lock); for (i = 0; i < dtrace_nprobes; i++) { if ((probe = dtrace_probes[i]) == NULL) continue; if (probe->dtpr_ecb == NULL) continue; prov = probe->dtpr_provider; if ((when = prov->dtpv_defunct) == 0) continue; /* * We have ECBs on a defunct provider: we want to reap these * ECBs to allow the provider to unregister. The destruction * of these ECBs must be done carefully: if we destroy the ECB * and the consumer later wishes to consume an EPID that * corresponds to the destroyed ECB (and if the EPID metadata * has not been previously consumed), the consumer will abort * processing on the unknown EPID. To reduce (but not, sadly, * eliminate) the possibility of this, we will only destroy an * ECB for a defunct provider if, for the state that * corresponds to the ECB: * * (a) There is no speculative tracing (which can effectively * cache an EPID for an arbitrary amount of time). * * (b) The principal buffers have been switched twice since the * provider became defunct. * * (c) The aggregation buffers are of zero size or have been * switched twice since the provider became defunct. * * We use dts_speculates to determine (a) and call a function * (dtrace_buffer_consumed()) to determine (b) and (c). Note * that as soon as we've been unable to destroy one of the ECBs * associated with the probe, we quit trying -- reaping is only * fruitful in as much as we can destroy all ECBs associated * with the defunct provider's probes. */ while ((ecb = probe->dtpr_ecb) != NULL) { dtrace_state_t *state = ecb->dte_state; dtrace_buffer_t *buf = state->dts_buffer; dtrace_buffer_t *aggbuf = state->dts_aggbuffer; if (state->dts_speculates) break; if (!dtrace_buffer_consumed(buf, when)) break; if (!dtrace_buffer_consumed(aggbuf, when)) break; dtrace_ecb_disable(ecb); ASSERT(probe->dtpr_ecb != ecb); dtrace_ecb_destroy(ecb); } } mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); } /* * DTrace DOF Functions */ /*ARGSUSED*/ static void dtrace_dof_error(dof_hdr_t *dof, const char *str) { if (dtrace_err_verbose) cmn_err(CE_WARN, "failed to process DOF: %s", str); #ifdef DTRACE_ERRDEBUG dtrace_errdebug(str); #endif } /* * Create DOF out of a currently enabled state. Right now, we only create * DOF containing the run-time options -- but this could be expanded to create * complete DOF representing the enabled state. */ static dof_hdr_t * dtrace_dof_create(dtrace_state_t *state) { dof_hdr_t *dof; dof_sec_t *sec; dof_optdesc_t *opt; int i, len = sizeof (dof_hdr_t) + roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + sizeof (dof_optdesc_t) * DTRACEOPT_MAX; ASSERT(MUTEX_HELD(&dtrace_lock)); dof = kmem_zalloc(len, KM_SLEEP); dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; dof->dofh_flags = 0; dof->dofh_hdrsize = sizeof (dof_hdr_t); dof->dofh_secsize = sizeof (dof_sec_t); dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ dof->dofh_secoff = sizeof (dof_hdr_t); dof->dofh_loadsz = len; dof->dofh_filesz = len; dof->dofh_pad = 0; /* * Fill in the option section header... */ sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); sec->dofs_type = DOF_SECT_OPTDESC; sec->dofs_align = sizeof (uint64_t); sec->dofs_flags = DOF_SECF_LOAD; sec->dofs_entsize = sizeof (dof_optdesc_t); opt = (dof_optdesc_t *)((uintptr_t)sec + roundup(sizeof (dof_sec_t), sizeof (uint64_t))); sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; for (i = 0; i < DTRACEOPT_MAX; i++) { opt[i].dofo_option = i; opt[i].dofo_strtab = DOF_SECIDX_NONE; opt[i].dofo_value = state->dts_options[i]; } return (dof); } static dof_hdr_t * dtrace_dof_copyin(uintptr_t uarg, int *errp) { dof_hdr_t hdr, *dof; ASSERT(!MUTEX_HELD(&dtrace_lock)); /* * First, we're going to copyin() the sizeof (dof_hdr_t). */ if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { dtrace_dof_error(NULL, "failed to copyin DOF header"); *errp = EFAULT; return (NULL); } /* * Now we'll allocate the entire DOF and copy it in -- provided * that the length isn't outrageous. */ if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { dtrace_dof_error(&hdr, "load size exceeds maximum"); *errp = E2BIG; return (NULL); } if (hdr.dofh_loadsz < sizeof (hdr)) { dtrace_dof_error(&hdr, "invalid load size"); *errp = EINVAL; return (NULL); } dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || dof->dofh_loadsz != hdr.dofh_loadsz) { kmem_free(dof, hdr.dofh_loadsz); *errp = EFAULT; return (NULL); } return (dof); } #ifdef __FreeBSD__ static dof_hdr_t * dtrace_dof_copyin_proc(struct proc *p, uintptr_t uarg, int *errp) { dof_hdr_t hdr, *dof; struct thread *td; size_t loadsz; ASSERT(!MUTEX_HELD(&dtrace_lock)); td = curthread; /* * First, we're going to copyin() the sizeof (dof_hdr_t). */ if (proc_readmem(td, p, uarg, &hdr, sizeof(hdr)) != sizeof(hdr)) { dtrace_dof_error(NULL, "failed to copyin DOF header"); *errp = EFAULT; return (NULL); } /* * Now we'll allocate the entire DOF and copy it in -- provided * that the length isn't outrageous. */ if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { dtrace_dof_error(&hdr, "load size exceeds maximum"); *errp = E2BIG; return (NULL); } loadsz = (size_t)hdr.dofh_loadsz; if (loadsz < sizeof (hdr)) { dtrace_dof_error(&hdr, "invalid load size"); *errp = EINVAL; return (NULL); } dof = kmem_alloc(loadsz, KM_SLEEP); if (proc_readmem(td, p, uarg, dof, loadsz) != loadsz || dof->dofh_loadsz != loadsz) { kmem_free(dof, hdr.dofh_loadsz); *errp = EFAULT; return (NULL); } return (dof); } static __inline uchar_t dtrace_dof_char(char c) { switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': return (c - '0'); case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': return (c - 'A' + 10); case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': return (c - 'a' + 10); } /* Should not reach here. */ return (UCHAR_MAX); } #endif /* __FreeBSD__ */ static dof_hdr_t * dtrace_dof_property(const char *name) { #ifdef __FreeBSD__ uint8_t *dofbuf; u_char *data, *eol; caddr_t doffile; size_t bytes, len, i; dof_hdr_t *dof; u_char c1, c2; dof = NULL; doffile = preload_search_by_type("dtrace_dof"); if (doffile == NULL) return (NULL); data = preload_fetch_addr(doffile); len = preload_fetch_size(doffile); for (;;) { /* Look for the end of the line. All lines end in a newline. */ eol = memchr(data, '\n', len); if (eol == NULL) return (NULL); if (strncmp(name, data, strlen(name)) == 0) break; eol++; /* skip past the newline */ len -= eol - data; data = eol; } /* We've found the data corresponding to the specified key. */ data += strlen(name) + 1; /* skip past the '=' */ len = eol - data; bytes = len / 2; if (bytes < sizeof(dof_hdr_t)) { dtrace_dof_error(NULL, "truncated header"); goto doferr; } /* * Each byte is represented by the two ASCII characters in its hex * representation. */ dofbuf = malloc(bytes, M_SOLARIS, M_WAITOK); for (i = 0; i < bytes; i++) { c1 = dtrace_dof_char(data[i * 2]); c2 = dtrace_dof_char(data[i * 2 + 1]); if (c1 == UCHAR_MAX || c2 == UCHAR_MAX) { dtrace_dof_error(NULL, "invalid hex char in DOF"); goto doferr; } dofbuf[i] = c1 * 16 + c2; } dof = (dof_hdr_t *)dofbuf; if (bytes < dof->dofh_loadsz) { dtrace_dof_error(NULL, "truncated DOF"); goto doferr; } if (dof->dofh_loadsz >= dtrace_dof_maxsize) { dtrace_dof_error(NULL, "oversized DOF"); goto doferr; } return (dof); doferr: free(dof, M_SOLARIS); return (NULL); #else /* __FreeBSD__ */ uchar_t *buf; uint64_t loadsz; unsigned int len, i; dof_hdr_t *dof; /* * Unfortunately, array of values in .conf files are always (and * only) interpreted to be integer arrays. We must read our DOF * as an integer array, and then squeeze it into a byte array. */ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) return (NULL); for (i = 0; i < len; i++) buf[i] = (uchar_t)(((int *)buf)[i]); if (len < sizeof (dof_hdr_t)) { ddi_prop_free(buf); dtrace_dof_error(NULL, "truncated header"); return (NULL); } if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { ddi_prop_free(buf); dtrace_dof_error(NULL, "truncated DOF"); return (NULL); } if (loadsz >= dtrace_dof_maxsize) { ddi_prop_free(buf); dtrace_dof_error(NULL, "oversized DOF"); return (NULL); } dof = kmem_alloc(loadsz, KM_SLEEP); bcopy(buf, dof, loadsz); ddi_prop_free(buf); return (dof); #endif /* !__FreeBSD__ */ } static void dtrace_dof_destroy(dof_hdr_t *dof) { kmem_free(dof, dof->dofh_loadsz); } /* * Return the dof_sec_t pointer corresponding to a given section index. If the * index is not valid, dtrace_dof_error() is called and NULL is returned. If * a type other than DOF_SECT_NONE is specified, the header is checked against * this type and NULL is returned if the types do not match. */ static dof_sec_t * dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) { dof_sec_t *sec = (dof_sec_t *)(uintptr_t) ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); if (i >= dof->dofh_secnum) { dtrace_dof_error(dof, "referenced section index is invalid"); return (NULL); } if (!(sec->dofs_flags & DOF_SECF_LOAD)) { dtrace_dof_error(dof, "referenced section is not loadable"); return (NULL); } if (type != DOF_SECT_NONE && type != sec->dofs_type) { dtrace_dof_error(dof, "referenced section is the wrong type"); return (NULL); } return (sec); } static dtrace_probedesc_t * dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) { dof_probedesc_t *probe; dof_sec_t *strtab; uintptr_t daddr = (uintptr_t)dof; uintptr_t str; size_t size; if (sec->dofs_type != DOF_SECT_PROBEDESC) { dtrace_dof_error(dof, "invalid probe section"); return (NULL); } if (sec->dofs_align != sizeof (dof_secidx_t)) { dtrace_dof_error(dof, "bad alignment in probe description"); return (NULL); } if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { dtrace_dof_error(dof, "truncated probe description"); return (NULL); } probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); if (strtab == NULL) return (NULL); str = daddr + strtab->dofs_offset; size = strtab->dofs_size; if (probe->dofp_provider >= strtab->dofs_size) { dtrace_dof_error(dof, "corrupt probe provider"); return (NULL); } (void) strncpy(desc->dtpd_provider, (char *)(str + probe->dofp_provider), MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); if (probe->dofp_mod >= strtab->dofs_size) { dtrace_dof_error(dof, "corrupt probe module"); return (NULL); } (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); if (probe->dofp_func >= strtab->dofs_size) { dtrace_dof_error(dof, "corrupt probe function"); return (NULL); } (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); if (probe->dofp_name >= strtab->dofs_size) { dtrace_dof_error(dof, "corrupt probe name"); return (NULL); } (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); return (desc); } static dtrace_difo_t * dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, cred_t *cr) { dtrace_difo_t *dp; size_t ttl = 0; dof_difohdr_t *dofd; uintptr_t daddr = (uintptr_t)dof; size_t max = dtrace_difo_maxsize; int i, l, n; static const struct { int section; int bufoffs; int lenoffs; int entsize; int align; const char *msg; } difo[] = { { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), sizeof (dif_instr_t), "multiple DIF sections" }, { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), sizeof (uint64_t), "multiple integer tables" }, { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), offsetof(dtrace_difo_t, dtdo_strlen), 0, sizeof (char), "multiple string tables" }, { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), sizeof (uint_t), "multiple variable tables" }, { DOF_SECT_NONE, 0, 0, 0, 0, NULL } }; if (sec->dofs_type != DOF_SECT_DIFOHDR) { dtrace_dof_error(dof, "invalid DIFO header section"); return (NULL); } if (sec->dofs_align != sizeof (dof_secidx_t)) { dtrace_dof_error(dof, "bad alignment in DIFO header"); return (NULL); } if (sec->dofs_size < sizeof (dof_difohdr_t) || sec->dofs_size % sizeof (dof_secidx_t)) { dtrace_dof_error(dof, "bad size in DIFO header"); return (NULL); } dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); dp->dtdo_rtype = dofd->dofd_rtype; for (l = 0; l < n; l++) { dof_sec_t *subsec; void **bufp; uint32_t *lenp; if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, dofd->dofd_links[l])) == NULL) goto err; /* invalid section link */ if (ttl + subsec->dofs_size > max) { dtrace_dof_error(dof, "exceeds maximum size"); goto err; } ttl += subsec->dofs_size; for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { if (subsec->dofs_type != difo[i].section) continue; if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { dtrace_dof_error(dof, "section not loaded"); goto err; } if (subsec->dofs_align != difo[i].align) { dtrace_dof_error(dof, "bad alignment"); goto err; } bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); if (*bufp != NULL) { dtrace_dof_error(dof, difo[i].msg); goto err; } if (difo[i].entsize != subsec->dofs_entsize) { dtrace_dof_error(dof, "entry size mismatch"); goto err; } if (subsec->dofs_entsize != 0 && (subsec->dofs_size % subsec->dofs_entsize) != 0) { dtrace_dof_error(dof, "corrupt entry size"); goto err; } *lenp = subsec->dofs_size; *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), *bufp, subsec->dofs_size); if (subsec->dofs_entsize != 0) *lenp /= subsec->dofs_entsize; break; } /* * If we encounter a loadable DIFO sub-section that is not * known to us, assume this is a broken program and fail. */ if (difo[i].section == DOF_SECT_NONE && (subsec->dofs_flags & DOF_SECF_LOAD)) { dtrace_dof_error(dof, "unrecognized DIFO subsection"); goto err; } } if (dp->dtdo_buf == NULL) { /* * We can't have a DIF object without DIF text. */ dtrace_dof_error(dof, "missing DIF text"); goto err; } /* * Before we validate the DIF object, run through the variable table * looking for the strings -- if any of their size are under, we'll set * their size to be the system-wide default string size. Note that * this should _not_ happen if the "strsize" option has been set -- * in this case, the compiler should have set the size to reflect the * setting of the option. */ for (i = 0; i < dp->dtdo_varlen; i++) { dtrace_difv_t *v = &dp->dtdo_vartab[i]; dtrace_diftype_t *t = &v->dtdv_type; if (v->dtdv_id < DIF_VAR_OTHER_UBASE) continue; if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) t->dtdt_size = dtrace_strsize_default; } if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) goto err; dtrace_difo_init(dp, vstate); return (dp); err: kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); kmem_free(dp, sizeof (dtrace_difo_t)); return (NULL); } static dtrace_predicate_t * dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, cred_t *cr) { dtrace_difo_t *dp; if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) return (NULL); return (dtrace_predicate_create(dp)); } static dtrace_actdesc_t * dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, cred_t *cr) { dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; dof_actdesc_t *desc; dof_sec_t *difosec; size_t offs; uintptr_t daddr = (uintptr_t)dof; uint64_t arg; dtrace_actkind_t kind; if (sec->dofs_type != DOF_SECT_ACTDESC) { dtrace_dof_error(dof, "invalid action section"); return (NULL); } if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { dtrace_dof_error(dof, "truncated action description"); return (NULL); } if (sec->dofs_align != sizeof (uint64_t)) { dtrace_dof_error(dof, "bad alignment in action description"); return (NULL); } if (sec->dofs_size < sec->dofs_entsize) { dtrace_dof_error(dof, "section entry size exceeds total size"); return (NULL); } if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { dtrace_dof_error(dof, "bad entry size in action description"); return (NULL); } if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); return (NULL); } for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { desc = (dof_actdesc_t *)(daddr + (uintptr_t)sec->dofs_offset + offs); kind = (dtrace_actkind_t)desc->dofa_kind; if ((DTRACEACT_ISPRINTFLIKE(kind) && (kind != DTRACEACT_PRINTA || desc->dofa_strtab != DOF_SECIDX_NONE)) || (kind == DTRACEACT_DIFEXPR && desc->dofa_strtab != DOF_SECIDX_NONE)) { dof_sec_t *strtab; char *str, *fmt; uint64_t i; /* * The argument to these actions is an index into the * DOF string table. For printf()-like actions, this * is the format string. For print(), this is the * CTF type of the expression result. */ if ((strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) goto err; str = (char *)((uintptr_t)dof + (uintptr_t)strtab->dofs_offset); for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { if (str[i] == '\0') break; } if (i >= strtab->dofs_size) { dtrace_dof_error(dof, "bogus format string"); goto err; } if (i == desc->dofa_arg) { dtrace_dof_error(dof, "empty format string"); goto err; } i -= desc->dofa_arg; fmt = kmem_alloc(i + 1, KM_SLEEP); bcopy(&str[desc->dofa_arg], fmt, i + 1); arg = (uint64_t)(uintptr_t)fmt; } else { if (kind == DTRACEACT_PRINTA) { ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); arg = 0; } else { arg = desc->dofa_arg; } } act = dtrace_actdesc_create(kind, desc->dofa_ntuple, desc->dofa_uarg, arg); if (last != NULL) { last->dtad_next = act; } else { first = act; } last = act; if (desc->dofa_difo == DOF_SECIDX_NONE) continue; if ((difosec = dtrace_dof_sect(dof, DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) goto err; act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); if (act->dtad_difo == NULL) goto err; } ASSERT(first != NULL); return (first); err: for (act = first; act != NULL; act = next) { next = act->dtad_next; dtrace_actdesc_release(act, vstate); } return (NULL); } static dtrace_ecbdesc_t * dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, cred_t *cr) { dtrace_ecbdesc_t *ep; dof_ecbdesc_t *ecb; dtrace_probedesc_t *desc; dtrace_predicate_t *pred = NULL; if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { dtrace_dof_error(dof, "truncated ECB description"); return (NULL); } if (sec->dofs_align != sizeof (uint64_t)) { dtrace_dof_error(dof, "bad alignment in ECB description"); return (NULL); } ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); if (sec == NULL) return (NULL); ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); ep->dted_uarg = ecb->dofe_uarg; desc = &ep->dted_probe; if (dtrace_dof_probedesc(dof, sec, desc) == NULL) goto err; if (ecb->dofe_pred != DOF_SECIDX_NONE) { if ((sec = dtrace_dof_sect(dof, DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) goto err; if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) goto err; ep->dted_pred.dtpdd_predicate = pred; } if (ecb->dofe_actions != DOF_SECIDX_NONE) { if ((sec = dtrace_dof_sect(dof, DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) goto err; ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); if (ep->dted_action == NULL) goto err; } return (ep); err: if (pred != NULL) dtrace_predicate_release(pred, vstate); kmem_free(ep, sizeof (dtrace_ecbdesc_t)); return (NULL); } /* * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the * specified DOF. At present, this amounts to simply adding 'ubase' to the * site of any user SETX relocations to account for load object base address. * In the future, if we need other relocations, this function can be extended. */ static int dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) { uintptr_t daddr = (uintptr_t)dof; dof_relohdr_t *dofr = (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); dof_sec_t *ss, *rs, *ts; dof_relodesc_t *r; uint_t i, n; if (sec->dofs_size < sizeof (dof_relohdr_t) || sec->dofs_align != sizeof (dof_secidx_t)) { dtrace_dof_error(dof, "invalid relocation header"); return (-1); } ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); if (ss == NULL || rs == NULL || ts == NULL) return (-1); /* dtrace_dof_error() has been called already */ if (rs->dofs_entsize < sizeof (dof_relodesc_t) || rs->dofs_align != sizeof (uint64_t)) { dtrace_dof_error(dof, "invalid relocation section"); return (-1); } r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); n = rs->dofs_size / rs->dofs_entsize; for (i = 0; i < n; i++) { uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; switch (r->dofr_type) { case DOF_RELO_NONE: break; case DOF_RELO_SETX: if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + sizeof (uint64_t) > ts->dofs_size) { dtrace_dof_error(dof, "bad relocation offset"); return (-1); } if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { dtrace_dof_error(dof, "misaligned setx relo"); return (-1); } *(uint64_t *)taddr += ubase; break; default: dtrace_dof_error(dof, "invalid relocation type"); return (-1); } r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); } return (0); } /* * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated * header: it should be at the front of a memory region that is at least * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in * size. It need not be validated in any other way. */ static int dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) { uint64_t len = dof->dofh_loadsz, seclen; uintptr_t daddr = (uintptr_t)dof; dtrace_ecbdesc_t *ep; dtrace_enabling_t *enab; uint_t i; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); /* * Check the DOF header identification bytes. In addition to checking * valid settings, we also verify that unused bits/bytes are zeroed so * we can use them later without fear of regressing existing binaries. */ if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { dtrace_dof_error(dof, "DOF magic string mismatch"); return (-1); } if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { dtrace_dof_error(dof, "DOF has invalid data model"); return (-1); } if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { dtrace_dof_error(dof, "DOF encoding mismatch"); return (-1); } if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { dtrace_dof_error(dof, "DOF version mismatch"); return (-1); } if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { dtrace_dof_error(dof, "DOF uses unsupported instruction set"); return (-1); } if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { dtrace_dof_error(dof, "DOF uses too many integer registers"); return (-1); } if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { dtrace_dof_error(dof, "DOF uses too many tuple registers"); return (-1); } for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { if (dof->dofh_ident[i] != 0) { dtrace_dof_error(dof, "DOF has invalid ident byte set"); return (-1); } } if (dof->dofh_flags & ~DOF_FL_VALID) { dtrace_dof_error(dof, "DOF has invalid flag bits set"); return (-1); } if (dof->dofh_secsize == 0) { dtrace_dof_error(dof, "zero section header size"); return (-1); } /* * Check that the section headers don't exceed the amount of DOF * data. Note that we cast the section size and number of sections * to uint64_t's to prevent possible overflow in the multiplication. */ seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; if (dof->dofh_secoff > len || seclen > len || dof->dofh_secoff + seclen > len) { dtrace_dof_error(dof, "truncated section headers"); return (-1); } if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { dtrace_dof_error(dof, "misaligned section headers"); return (-1); } if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { dtrace_dof_error(dof, "misaligned section size"); return (-1); } /* * Take an initial pass through the section headers to be sure that * the headers don't have stray offsets. If the 'noprobes' flag is * set, do not permit sections relating to providers, probes, or args. */ for (i = 0; i < dof->dofh_secnum; i++) { dof_sec_t *sec = (dof_sec_t *)(daddr + (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); if (noprobes) { switch (sec->dofs_type) { case DOF_SECT_PROVIDER: case DOF_SECT_PROBES: case DOF_SECT_PRARGS: case DOF_SECT_PROFFS: dtrace_dof_error(dof, "illegal sections " "for enabling"); return (-1); } } if (DOF_SEC_ISLOADABLE(sec->dofs_type) && !(sec->dofs_flags & DOF_SECF_LOAD)) { dtrace_dof_error(dof, "loadable section with load " "flag unset"); return (-1); } if (!(sec->dofs_flags & DOF_SECF_LOAD)) continue; /* just ignore non-loadable sections */ if (!ISP2(sec->dofs_align)) { dtrace_dof_error(dof, "bad section alignment"); return (-1); } if (sec->dofs_offset & (sec->dofs_align - 1)) { dtrace_dof_error(dof, "misaligned section"); return (-1); } if (sec->dofs_offset > len || sec->dofs_size > len || sec->dofs_offset + sec->dofs_size > len) { dtrace_dof_error(dof, "corrupt section header"); return (-1); } if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + sec->dofs_offset + sec->dofs_size - 1) != '\0') { dtrace_dof_error(dof, "non-terminating string table"); return (-1); } } /* * Take a second pass through the sections and locate and perform any * relocations that are present. We do this after the first pass to * be sure that all sections have had their headers validated. */ for (i = 0; i < dof->dofh_secnum; i++) { dof_sec_t *sec = (dof_sec_t *)(daddr + (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); if (!(sec->dofs_flags & DOF_SECF_LOAD)) continue; /* skip sections that are not loadable */ switch (sec->dofs_type) { case DOF_SECT_URELHDR: if (dtrace_dof_relocate(dof, sec, ubase) != 0) return (-1); break; } } if ((enab = *enabp) == NULL) enab = *enabp = dtrace_enabling_create(vstate); for (i = 0; i < dof->dofh_secnum; i++) { dof_sec_t *sec = (dof_sec_t *)(daddr + (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); if (sec->dofs_type != DOF_SECT_ECBDESC) continue; if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { dtrace_enabling_destroy(enab); *enabp = NULL; return (-1); } dtrace_enabling_add(enab, ep); } return (0); } /* * Process DOF for any options. This routine assumes that the DOF has been * at least processed by dtrace_dof_slurp(). */ static int dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) { int i, rval; uint32_t entsize; size_t offs; dof_optdesc_t *desc; for (i = 0; i < dof->dofh_secnum; i++) { dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); if (sec->dofs_type != DOF_SECT_OPTDESC) continue; if (sec->dofs_align != sizeof (uint64_t)) { dtrace_dof_error(dof, "bad alignment in " "option description"); return (EINVAL); } if ((entsize = sec->dofs_entsize) == 0) { dtrace_dof_error(dof, "zeroed option entry size"); return (EINVAL); } if (entsize < sizeof (dof_optdesc_t)) { dtrace_dof_error(dof, "bad option entry size"); return (EINVAL); } for (offs = 0; offs < sec->dofs_size; offs += entsize) { desc = (dof_optdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset + offs); if (desc->dofo_strtab != DOF_SECIDX_NONE) { dtrace_dof_error(dof, "non-zero option string"); return (EINVAL); } if (desc->dofo_value == DTRACEOPT_UNSET) { dtrace_dof_error(dof, "unset option"); return (EINVAL); } if ((rval = dtrace_state_option(state, desc->dofo_option, desc->dofo_value)) != 0) { dtrace_dof_error(dof, "rejected option"); return (rval); } } } return (0); } /* * DTrace Consumer State Functions */ static int dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) { size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; void *base; uintptr_t limit; dtrace_dynvar_t *dvar, *next, *start; int i; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); bzero(dstate, sizeof (dtrace_dstate_t)); if ((dstate->dtds_chunksize = chunksize) == 0) dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; VERIFY(dstate->dtds_chunksize < LONG_MAX); if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) size = min; if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL) return (ENOMEM); dstate->dtds_size = size; dstate->dtds_base = base; dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); if (hashsize != 1 && (hashsize & 1)) hashsize--; dstate->dtds_hashsize = hashsize; dstate->dtds_hash = dstate->dtds_base; /* * Set all of our hash buckets to point to the single sink, and (if * it hasn't already been set), set the sink's hash value to be the * sink sentinel value. The sink is needed for dynamic variable * lookups to know that they have iterated over an entire, valid hash * chain. */ for (i = 0; i < hashsize; i++) dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; /* * Determine number of active CPUs. Divide free list evenly among * active CPUs. */ start = (dtrace_dynvar_t *) ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); limit = (uintptr_t)base + size; VERIFY((uintptr_t)start < limit); VERIFY((uintptr_t)start >= (uintptr_t)base); maxper = (limit - (uintptr_t)start) / NCPU; maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; #ifndef illumos CPU_FOREACH(i) { #else for (i = 0; i < NCPU; i++) { #endif dstate->dtds_percpu[i].dtdsc_free = dvar = start; /* * If we don't even have enough chunks to make it once through * NCPUs, we're just going to allocate everything to the first * CPU. And if we're on the last CPU, we're going to allocate * whatever is left over. In either case, we set the limit to * be the limit of the dynamic variable space. */ if (maxper == 0 || i == NCPU - 1) { limit = (uintptr_t)base + size; start = NULL; } else { limit = (uintptr_t)start + maxper; start = (dtrace_dynvar_t *)limit; } VERIFY(limit <= (uintptr_t)base + size); for (;;) { next = (dtrace_dynvar_t *)((uintptr_t)dvar + dstate->dtds_chunksize); if ((uintptr_t)next + dstate->dtds_chunksize >= limit) break; VERIFY((uintptr_t)dvar >= (uintptr_t)base && (uintptr_t)dvar <= (uintptr_t)base + size); dvar->dtdv_next = next; dvar = next; } if (maxper == 0) break; } return (0); } static void dtrace_dstate_fini(dtrace_dstate_t *dstate) { ASSERT(MUTEX_HELD(&cpu_lock)); if (dstate->dtds_base == NULL) return; kmem_free(dstate->dtds_base, dstate->dtds_size); kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); } static void dtrace_vstate_fini(dtrace_vstate_t *vstate) { /* * Logical XOR, where are you? */ ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); if (vstate->dtvs_nglobals > 0) { kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * sizeof (dtrace_statvar_t *)); } if (vstate->dtvs_ntlocals > 0) { kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * sizeof (dtrace_difv_t)); } ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); if (vstate->dtvs_nlocals > 0) { kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * sizeof (dtrace_statvar_t *)); } } #ifdef illumos static void dtrace_state_clean(dtrace_state_t *state) { if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) return; dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); dtrace_speculation_clean(state); } static void dtrace_state_deadman(dtrace_state_t *state) { hrtime_t now; dtrace_sync(); now = dtrace_gethrtime(); if (state != dtrace_anon.dta_state && now - state->dts_laststatus >= dtrace_deadman_user) return; /* * We must be sure that dts_alive never appears to be less than the * value upon entry to dtrace_state_deadman(), and because we lack a * dtrace_cas64(), we cannot store to it atomically. We thus instead * store INT64_MAX to it, followed by a memory barrier, followed by * the new value. This assures that dts_alive never appears to be * less than its true value, regardless of the order in which the * stores to the underlying storage are issued. */ state->dts_alive = INT64_MAX; dtrace_membar_producer(); state->dts_alive = now; } #else /* !illumos */ static void dtrace_state_clean(void *arg) { dtrace_state_t *state = arg; dtrace_optval_t *opt = state->dts_options; if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) return; dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); dtrace_speculation_clean(state); callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, dtrace_state_clean, state); } static void dtrace_state_deadman(void *arg) { dtrace_state_t *state = arg; hrtime_t now; dtrace_sync(); dtrace_debug_output(); now = dtrace_gethrtime(); if (state != dtrace_anon.dta_state && now - state->dts_laststatus >= dtrace_deadman_user) return; /* * We must be sure that dts_alive never appears to be less than the * value upon entry to dtrace_state_deadman(), and because we lack a * dtrace_cas64(), we cannot store to it atomically. We thus instead * store INT64_MAX to it, followed by a memory barrier, followed by * the new value. This assures that dts_alive never appears to be * less than its true value, regardless of the order in which the * stores to the underlying storage are issued. */ state->dts_alive = INT64_MAX; dtrace_membar_producer(); state->dts_alive = now; callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, dtrace_state_deadman, state); } #endif /* illumos */ static dtrace_state_t * #ifdef illumos dtrace_state_create(dev_t *devp, cred_t *cr) #else dtrace_state_create(struct cdev *dev, struct ucred *cred __unused) #endif { #ifdef illumos minor_t minor; major_t major; #else cred_t *cr = NULL; int m = 0; #endif char c[30]; dtrace_state_t *state; dtrace_optval_t *opt; int bufsize = NCPU * sizeof (dtrace_buffer_t), i; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(MUTEX_HELD(&cpu_lock)); #ifdef illumos minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, VM_BESTFIT | VM_SLEEP); if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); return (NULL); } state = ddi_get_soft_state(dtrace_softstate, minor); #else if (dev != NULL) { cr = dev->si_cred; m = dev2unit(dev); } /* Allocate memory for the state. */ state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); #endif state->dts_epid = DTRACE_EPIDNONE + 1; (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); #ifdef illumos state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); if (devp != NULL) { major = getemajor(*devp); } else { major = ddi_driver_major(dtrace_devi); } state->dts_dev = makedevice(major, minor); if (devp != NULL) *devp = state->dts_dev; #else state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); state->dts_dev = dev; #endif /* * We allocate NCPU buffers. On the one hand, this can be quite * a bit of memory per instance (nearly 36K on a Starcat). On the * other hand, it saves an additional memory reference in the probe * path. */ state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); #ifdef illumos state->dts_cleaner = CYCLIC_NONE; state->dts_deadman = CYCLIC_NONE; #else callout_init(&state->dts_cleaner, 1); callout_init(&state->dts_deadman, 1); #endif state->dts_vstate.dtvs_state = state; for (i = 0; i < DTRACEOPT_MAX; i++) state->dts_options[i] = DTRACEOPT_UNSET; /* * Set the default options. */ opt = state->dts_options; opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; state->dts_activity = DTRACE_ACTIVITY_INACTIVE; /* * Depending on the user credentials, we set flag bits which alter probe * visibility or the amount of destructiveness allowed. In the case of * actual anonymous tracing, or the possession of all privileges, all of * the normal checks are bypassed. */ if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { state->dts_cred.dcr_visible = DTRACE_CRV_ALL; state->dts_cred.dcr_action = DTRACE_CRA_ALL; } else { /* * Set up the credentials for this instantiation. We take a * hold on the credential to prevent it from disappearing on * us; this in turn prevents the zone_t referenced by this * credential from disappearing. This means that we can * examine the credential and the zone from probe context. */ crhold(cr); state->dts_cred.dcr_cred = cr; /* * CRA_PROC means "we have *some* privilege for dtrace" and * unlocks the use of variables like pid, zonename, etc. */ if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { state->dts_cred.dcr_action |= DTRACE_CRA_PROC; } /* * dtrace_user allows use of syscall and profile providers. * If the user also has proc_owner and/or proc_zone, we * extend the scope to include additional visibility and * destructive power. */ if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { state->dts_cred.dcr_visible |= DTRACE_CRV_ALLPROC; state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; } if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { state->dts_cred.dcr_visible |= DTRACE_CRV_ALLZONE; state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; } /* * If we have all privs in whatever zone this is, * we can do destructive things to processes which * have altered credentials. */ #ifdef illumos if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), cr->cr_zone->zone_privset)) { state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; } #endif } /* * Holding the dtrace_kernel privilege also implies that * the user has the dtrace_user privilege from a visibility * perspective. But without further privileges, some * destructive actions are not available. */ if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { /* * Make all probes in all zones visible. However, * this doesn't mean that all actions become available * to all zones. */ state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | DTRACE_CRA_PROC; /* * Holding proc_owner means that destructive actions * for *this* zone are allowed. */ if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; /* * Holding proc_zone means that destructive actions * for this user/group ID in all zones is allowed. */ if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; #ifdef illumos /* * If we have all privs in whatever zone this is, * we can do destructive things to processes which * have altered credentials. */ if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), cr->cr_zone->zone_privset)) { state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; } #endif } /* * Holding the dtrace_proc privilege gives control over fasttrap * and pid providers. We need to grant wider destructive * privileges in the event that the user has proc_owner and/or * proc_zone. */ if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) state->dts_cred.dcr_action |= DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; } } return (state); } static int dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) { dtrace_optval_t *opt = state->dts_options, size; processorid_t cpu = 0;; int flags = 0, rval, factor, divisor = 1; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(MUTEX_HELD(&cpu_lock)); ASSERT(which < DTRACEOPT_MAX); ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || (state == dtrace_anon.dta_state && state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) return (0); if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) cpu = opt[DTRACEOPT_CPU]; if (which == DTRACEOPT_SPECSIZE) flags |= DTRACEBUF_NOSWITCH; if (which == DTRACEOPT_BUFSIZE) { if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) flags |= DTRACEBUF_RING; if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) flags |= DTRACEBUF_FILL; if (state != dtrace_anon.dta_state || state->dts_activity != DTRACE_ACTIVITY_ACTIVE) flags |= DTRACEBUF_INACTIVE; } for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) { /* * The size must be 8-byte aligned. If the size is not 8-byte * aligned, drop it down by the difference. */ if (size & (sizeof (uint64_t) - 1)) size -= size & (sizeof (uint64_t) - 1); if (size < state->dts_reserve) { /* * Buffers always must be large enough to accommodate * their prereserved space. We return E2BIG instead * of ENOMEM in this case to allow for user-level * software to differentiate the cases. */ return (E2BIG); } rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor); if (rval != ENOMEM) { opt[which] = size; return (rval); } if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) return (rval); for (divisor = 2; divisor < factor; divisor <<= 1) continue; } return (ENOMEM); } static int dtrace_state_buffers(dtrace_state_t *state) { dtrace_speculation_t *spec = state->dts_speculations; int rval, i; if ((rval = dtrace_state_buffer(state, state->dts_buffer, DTRACEOPT_BUFSIZE)) != 0) return (rval); if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, DTRACEOPT_AGGSIZE)) != 0) return (rval); for (i = 0; i < state->dts_nspeculations; i++) { if ((rval = dtrace_state_buffer(state, spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) return (rval); } return (0); } static void dtrace_state_prereserve(dtrace_state_t *state) { dtrace_ecb_t *ecb; dtrace_probe_t *probe; state->dts_reserve = 0; if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) return; /* * If our buffer policy is a "fill" buffer policy, we need to set the * prereserved space to be the space required by the END probes. */ probe = dtrace_probes[dtrace_probeid_end - 1]; ASSERT(probe != NULL); for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { if (ecb->dte_state != state) continue; state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; } } static int dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) { dtrace_optval_t *opt = state->dts_options, sz, nspec; dtrace_speculation_t *spec; dtrace_buffer_t *buf; #ifdef illumos cyc_handler_t hdlr; cyc_time_t when; #endif int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); dtrace_icookie_t cookie; mutex_enter(&cpu_lock); mutex_enter(&dtrace_lock); if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { rval = EBUSY; goto out; } /* * Before we can perform any checks, we must prime all of the * retained enablings that correspond to this state. */ dtrace_enabling_prime(state); if (state->dts_destructive && !state->dts_cred.dcr_destructive) { rval = EACCES; goto out; } dtrace_state_prereserve(state); /* * Now we want to do is try to allocate our speculations. * We do not automatically resize the number of speculations; if * this fails, we will fail the operation. */ nspec = opt[DTRACEOPT_NSPEC]; ASSERT(nspec != DTRACEOPT_UNSET); if (nspec > INT_MAX) { rval = ENOMEM; goto out; } spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP | KM_NORMALPRI); if (spec == NULL) { rval = ENOMEM; goto out; } state->dts_speculations = spec; state->dts_nspeculations = (int)nspec; for (i = 0; i < nspec; i++) { if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP | KM_NORMALPRI)) == NULL) { rval = ENOMEM; goto err; } spec[i].dtsp_buffer = buf; } if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { if (dtrace_anon.dta_state == NULL) { rval = ENOENT; goto out; } if (state->dts_necbs != 0) { rval = EALREADY; goto out; } state->dts_anon = dtrace_anon_grab(); ASSERT(state->dts_anon != NULL); state = state->dts_anon; /* * We want "grabanon" to be set in the grabbed state, so we'll * copy that option value from the grabbing state into the * grabbed state. */ state->dts_options[DTRACEOPT_GRABANON] = opt[DTRACEOPT_GRABANON]; *cpu = dtrace_anon.dta_beganon; /* * If the anonymous state is active (as it almost certainly * is if the anonymous enabling ultimately matched anything), * we don't allow any further option processing -- but we * don't return failure. */ if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) goto out; } if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && opt[DTRACEOPT_AGGSIZE] != 0) { if (state->dts_aggregations == NULL) { /* * We're not going to create an aggregation buffer * because we don't have any ECBs that contain * aggregations -- set this option to 0. */ opt[DTRACEOPT_AGGSIZE] = 0; } else { /* * If we have an aggregation buffer, we must also have * a buffer to use as scratch. */ if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { opt[DTRACEOPT_BUFSIZE] = state->dts_needed; } } } if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && opt[DTRACEOPT_SPECSIZE] != 0) { if (!state->dts_speculates) { /* * We're not going to create speculation buffers * because we don't have any ECBs that actually * speculate -- set the speculation size to 0. */ opt[DTRACEOPT_SPECSIZE] = 0; } } /* * The bare minimum size for any buffer that we're actually going to * do anything to is sizeof (uint64_t). */ sz = sizeof (uint64_t); if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { /* * A buffer size has been explicitly set to 0 (or to a size * that will be adjusted to 0) and we need the space -- we * need to return failure. We return ENOSPC to differentiate * it from failing to allocate a buffer due to failure to meet * the reserve (for which we return E2BIG). */ rval = ENOSPC; goto out; } if ((rval = dtrace_state_buffers(state)) != 0) goto err; if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) sz = dtrace_dstate_defsize; do { rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); if (rval == 0) break; if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) goto err; } while (sz >>= 1); opt[DTRACEOPT_DYNVARSIZE] = sz; if (rval != 0) goto err; if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; if (opt[DTRACEOPT_CLEANRATE] == 0) opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); #ifdef illumos hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; hdlr.cyh_arg = state; hdlr.cyh_level = CY_LOW_LEVEL; when.cyt_when = 0; when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; state->dts_cleaner = cyclic_add(&hdlr, &when); hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; hdlr.cyh_arg = state; hdlr.cyh_level = CY_LOW_LEVEL; when.cyt_when = 0; when.cyt_interval = dtrace_deadman_interval; state->dts_deadman = cyclic_add(&hdlr, &when); #else callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, dtrace_state_clean, state); callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, dtrace_state_deadman, state); #endif state->dts_activity = DTRACE_ACTIVITY_WARMUP; #ifdef illumos if (state->dts_getf != 0 && !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { /* * We don't have kernel privs but we have at least one call * to getf(); we need to bump our zone's count, and (if * this is the first enabling to have an unprivileged call * to getf()) we need to hook into closef(). */ state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++; if (dtrace_getf++ == 0) { ASSERT(dtrace_closef == NULL); dtrace_closef = dtrace_getf_barrier; } } #endif /* * Now it's time to actually fire the BEGIN probe. We need to disable * interrupts here both to record the CPU on which we fired the BEGIN * probe (the data from this CPU will be processed first at user * level) and to manually activate the buffer for this CPU. */ cookie = dtrace_interrupt_disable(); *cpu = curcpu; ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; dtrace_probe(dtrace_probeid_begin, (uint64_t)(uintptr_t)state, 0, 0, 0, 0); dtrace_interrupt_enable(cookie); /* * We may have had an exit action from a BEGIN probe; only change our * state to ACTIVE if we're still in WARMUP. */ ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || state->dts_activity == DTRACE_ACTIVITY_DRAINING); if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) state->dts_activity = DTRACE_ACTIVITY_ACTIVE; #ifdef __FreeBSD__ /* * We enable anonymous tracing before APs are started, so we must * activate buffers using the current CPU. */ if (state == dtrace_anon.dta_state) for (int i = 0; i < NCPU; i++) dtrace_buffer_activate_cpu(state, i); else dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_buffer_activate, state); #else /* * Regardless of whether or not now we're in ACTIVE or DRAINING, we * want each CPU to transition its principal buffer out of the * INACTIVE state. Doing this assures that no CPU will suddenly begin * processing an ECB halfway down a probe's ECB chain; all CPUs will * atomically transition from processing none of a state's ECBs to * processing all of them. */ dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_buffer_activate, state); #endif goto out; err: dtrace_buffer_free(state->dts_buffer); dtrace_buffer_free(state->dts_aggbuffer); if ((nspec = state->dts_nspeculations) == 0) { ASSERT(state->dts_speculations == NULL); goto out; } spec = state->dts_speculations; ASSERT(spec != NULL); for (i = 0; i < state->dts_nspeculations; i++) { if ((buf = spec[i].dtsp_buffer) == NULL) break; dtrace_buffer_free(buf); kmem_free(buf, bufsize); } kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); state->dts_nspeculations = 0; state->dts_speculations = NULL; out: mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); return (rval); } static int dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) { dtrace_icookie_t cookie; ASSERT(MUTEX_HELD(&dtrace_lock)); if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && state->dts_activity != DTRACE_ACTIVITY_DRAINING) return (EINVAL); /* * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync * to be sure that every CPU has seen it. See below for the details * on why this is done. */ state->dts_activity = DTRACE_ACTIVITY_DRAINING; dtrace_sync(); /* * By this point, it is impossible for any CPU to be still processing * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN * iff we're in the END probe. */ state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; dtrace_sync(); ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); /* * Finally, we can release the reserve and call the END probe. We * disable interrupts across calling the END probe to allow us to * return the CPU on which we actually called the END probe. This * allows user-land to be sure that this CPU's principal buffer is * processed last. */ state->dts_reserve = 0; cookie = dtrace_interrupt_disable(); *cpu = curcpu; dtrace_probe(dtrace_probeid_end, (uint64_t)(uintptr_t)state, 0, 0, 0, 0); dtrace_interrupt_enable(cookie); state->dts_activity = DTRACE_ACTIVITY_STOPPED; dtrace_sync(); #ifdef illumos if (state->dts_getf != 0 && !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) { /* * We don't have kernel privs but we have at least one call * to getf(); we need to lower our zone's count, and (if * this is the last enabling to have an unprivileged call * to getf()) we need to clear the closef() hook. */ ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0); ASSERT(dtrace_closef == dtrace_getf_barrier); ASSERT(dtrace_getf > 0); state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--; if (--dtrace_getf == 0) dtrace_closef = NULL; } #endif return (0); } static int dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, dtrace_optval_t val) { ASSERT(MUTEX_HELD(&dtrace_lock)); if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) return (EBUSY); if (option >= DTRACEOPT_MAX) return (EINVAL); if (option != DTRACEOPT_CPU && val < 0) return (EINVAL); switch (option) { case DTRACEOPT_DESTRUCTIVE: if (dtrace_destructive_disallow) return (EACCES); state->dts_cred.dcr_destructive = 1; break; case DTRACEOPT_BUFSIZE: case DTRACEOPT_DYNVARSIZE: case DTRACEOPT_AGGSIZE: case DTRACEOPT_SPECSIZE: case DTRACEOPT_STRSIZE: if (val < 0) return (EINVAL); if (val >= LONG_MAX) { /* * If this is an otherwise negative value, set it to * the highest multiple of 128m less than LONG_MAX. * Technically, we're adjusting the size without * regard to the buffer resizing policy, but in fact, * this has no effect -- if we set the buffer size to * ~LONG_MAX and the buffer policy is ultimately set to * be "manual", the buffer allocation is guaranteed to * fail, if only because the allocation requires two * buffers. (We set the the size to the highest * multiple of 128m because it ensures that the size * will remain a multiple of a megabyte when * repeatedly halved -- all the way down to 15m.) */ val = LONG_MAX - (1 << 27) + 1; } } state->dts_options[option] = val; return (0); } static void dtrace_state_destroy(dtrace_state_t *state) { dtrace_ecb_t *ecb; dtrace_vstate_t *vstate = &state->dts_vstate; #ifdef illumos minor_t minor = getminor(state->dts_dev); #endif int i, bufsize = NCPU * sizeof (dtrace_buffer_t); dtrace_speculation_t *spec = state->dts_speculations; int nspec = state->dts_nspeculations; uint32_t match; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(MUTEX_HELD(&cpu_lock)); /* * First, retract any retained enablings for this state. */ dtrace_enabling_retract(state); ASSERT(state->dts_nretained == 0); if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || state->dts_activity == DTRACE_ACTIVITY_DRAINING) { /* * We have managed to come into dtrace_state_destroy() on a * hot enabling -- almost certainly because of a disorderly * shutdown of a consumer. (That is, a consumer that is * exiting without having called dtrace_stop().) In this case, * we're going to set our activity to be KILLED, and then * issue a sync to be sure that everyone is out of probe * context before we start blowing away ECBs. */ state->dts_activity = DTRACE_ACTIVITY_KILLED; dtrace_sync(); } /* * Release the credential hold we took in dtrace_state_create(). */ if (state->dts_cred.dcr_cred != NULL) crfree(state->dts_cred.dcr_cred); /* * Now we can safely disable and destroy any enabled probes. Because * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress * (especially if they're all enabled), we take two passes through the * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and * in the second we disable whatever is left over. */ for (match = DTRACE_PRIV_KERNEL; ; match = 0) { for (i = 0; i < state->dts_necbs; i++) { if ((ecb = state->dts_ecbs[i]) == NULL) continue; if (match && ecb->dte_probe != NULL) { dtrace_probe_t *probe = ecb->dte_probe; dtrace_provider_t *prov = probe->dtpr_provider; if (!(prov->dtpv_priv.dtpp_flags & match)) continue; } dtrace_ecb_disable(ecb); dtrace_ecb_destroy(ecb); } if (!match) break; } /* * Before we free the buffers, perform one more sync to assure that * every CPU is out of probe context. */ dtrace_sync(); dtrace_buffer_free(state->dts_buffer); dtrace_buffer_free(state->dts_aggbuffer); for (i = 0; i < nspec; i++) dtrace_buffer_free(spec[i].dtsp_buffer); #ifdef illumos if (state->dts_cleaner != CYCLIC_NONE) cyclic_remove(state->dts_cleaner); if (state->dts_deadman != CYCLIC_NONE) cyclic_remove(state->dts_deadman); #else callout_stop(&state->dts_cleaner); callout_drain(&state->dts_cleaner); callout_stop(&state->dts_deadman); callout_drain(&state->dts_deadman); #endif dtrace_dstate_fini(&vstate->dtvs_dynvars); dtrace_vstate_fini(vstate); if (state->dts_ecbs != NULL) kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); if (state->dts_aggregations != NULL) { #ifdef DEBUG for (i = 0; i < state->dts_naggregations; i++) ASSERT(state->dts_aggregations[i] == NULL); #endif ASSERT(state->dts_naggregations > 0); kmem_free(state->dts_aggregations, state->dts_naggregations * sizeof (dtrace_aggregation_t *)); } kmem_free(state->dts_buffer, bufsize); kmem_free(state->dts_aggbuffer, bufsize); for (i = 0; i < nspec; i++) kmem_free(spec[i].dtsp_buffer, bufsize); if (spec != NULL) kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); dtrace_format_destroy(state); if (state->dts_aggid_arena != NULL) { #ifdef illumos vmem_destroy(state->dts_aggid_arena); #else delete_unrhdr(state->dts_aggid_arena); #endif state->dts_aggid_arena = NULL; } #ifdef illumos ddi_soft_state_free(dtrace_softstate, minor); vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); #endif } /* * DTrace Anonymous Enabling Functions */ static dtrace_state_t * dtrace_anon_grab(void) { dtrace_state_t *state; ASSERT(MUTEX_HELD(&dtrace_lock)); if ((state = dtrace_anon.dta_state) == NULL) { ASSERT(dtrace_anon.dta_enabling == NULL); return (NULL); } ASSERT(dtrace_anon.dta_enabling != NULL); ASSERT(dtrace_retained != NULL); dtrace_enabling_destroy(dtrace_anon.dta_enabling); dtrace_anon.dta_enabling = NULL; dtrace_anon.dta_state = NULL; return (state); } static void dtrace_anon_property(void) { int i, rv; dtrace_state_t *state; dof_hdr_t *dof; char c[32]; /* enough for "dof-data-" + digits */ ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(MUTEX_HELD(&cpu_lock)); for (i = 0; ; i++) { (void) snprintf(c, sizeof (c), "dof-data-%d", i); dtrace_err_verbose = 1; if ((dof = dtrace_dof_property(c)) == NULL) { dtrace_err_verbose = 0; break; } #ifdef illumos /* * We want to create anonymous state, so we need to transition * the kernel debugger to indicate that DTrace is active. If * this fails (e.g. because the debugger has modified text in * some way), we won't continue with the processing. */ if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { cmn_err(CE_NOTE, "kernel debugger active; anonymous " "enabling ignored."); dtrace_dof_destroy(dof); break; } #endif /* * If we haven't allocated an anonymous state, we'll do so now. */ if ((state = dtrace_anon.dta_state) == NULL) { state = dtrace_state_create(NULL, NULL); dtrace_anon.dta_state = state; if (state == NULL) { /* * This basically shouldn't happen: the only * failure mode from dtrace_state_create() is a * failure of ddi_soft_state_zalloc() that * itself should never happen. Still, the * interface allows for a failure mode, and * we want to fail as gracefully as possible: * we'll emit an error message and cease * processing anonymous state in this case. */ cmn_err(CE_WARN, "failed to create " "anonymous state"); dtrace_dof_destroy(dof); break; } } rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), &dtrace_anon.dta_enabling, 0, B_TRUE); if (rv == 0) rv = dtrace_dof_options(dof, state); dtrace_err_verbose = 0; dtrace_dof_destroy(dof); if (rv != 0) { /* * This is malformed DOF; chuck any anonymous state * that we created. */ ASSERT(dtrace_anon.dta_enabling == NULL); dtrace_state_destroy(state); dtrace_anon.dta_state = NULL; break; } ASSERT(dtrace_anon.dta_enabling != NULL); } if (dtrace_anon.dta_enabling != NULL) { int rval; /* * dtrace_enabling_retain() can only fail because we are * trying to retain more enablings than are allowed -- but * we only have one anonymous enabling, and we are guaranteed * to be allowed at least one retained enabling; we assert * that dtrace_enabling_retain() returns success. */ rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); ASSERT(rval == 0); dtrace_enabling_dump(dtrace_anon.dta_enabling); } } /* * DTrace Helper Functions */ static void dtrace_helper_trace(dtrace_helper_action_t *helper, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) { uint32_t size, next, nnext, i; dtrace_helptrace_t *ent, *buffer; uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; if ((buffer = dtrace_helptrace_buffer) == NULL) return; ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); /* * What would a tracing framework be without its own tracing * framework? (Well, a hell of a lot simpler, for starters...) */ size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * sizeof (uint64_t) - sizeof (uint64_t); /* * Iterate until we can allocate a slot in the trace buffer. */ do { next = dtrace_helptrace_next; if (next + size < dtrace_helptrace_bufsize) { nnext = next + size; } else { nnext = size; } } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); /* * We have our slot; fill it in. */ if (nnext == size) { dtrace_helptrace_wrapped++; next = 0; } ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next); ent->dtht_helper = helper; ent->dtht_where = where; ent->dtht_nlocals = vstate->dtvs_nlocals; ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? mstate->dtms_fltoffs : -1; ent->dtht_fault = DTRACE_FLAGS2FLT(flags); ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; for (i = 0; i < vstate->dtvs_nlocals; i++) { dtrace_statvar_t *svar; if ((svar = vstate->dtvs_locals[i]) == NULL) continue; ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); ent->dtht_locals[i] = ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; } } static uint64_t dtrace_helper(int which, dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t arg0, uint64_t arg1) { uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; uint64_t sarg0 = mstate->dtms_arg[0]; uint64_t sarg1 = mstate->dtms_arg[1]; uint64_t rval = 0; dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; dtrace_helper_action_t *helper; dtrace_vstate_t *vstate; dtrace_difo_t *pred; int i, trace = dtrace_helptrace_buffer != NULL; ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); if (helpers == NULL) return (0); if ((helper = helpers->dthps_actions[which]) == NULL) return (0); vstate = &helpers->dthps_vstate; mstate->dtms_arg[0] = arg0; mstate->dtms_arg[1] = arg1; /* * Now iterate over each helper. If its predicate evaluates to 'true', * we'll call the corresponding actions. Note that the below calls * to dtrace_dif_emulate() may set faults in machine state. This is * okay: our caller (the outer dtrace_dif_emulate()) will simply plow * the stored DIF offset with its own (which is the desired behavior). * Also, note the calls to dtrace_dif_emulate() may allocate scratch * from machine state; this is okay, too. */ for (; helper != NULL; helper = helper->dtha_next) { if ((pred = helper->dtha_predicate) != NULL) { if (trace) dtrace_helper_trace(helper, mstate, vstate, 0); if (!dtrace_dif_emulate(pred, mstate, vstate, state)) goto next; if (*flags & CPU_DTRACE_FAULT) goto err; } for (i = 0; i < helper->dtha_nactions; i++) { if (trace) dtrace_helper_trace(helper, mstate, vstate, i + 1); rval = dtrace_dif_emulate(helper->dtha_actions[i], mstate, vstate, state); if (*flags & CPU_DTRACE_FAULT) goto err; } next: if (trace) dtrace_helper_trace(helper, mstate, vstate, DTRACE_HELPTRACE_NEXT); } if (trace) dtrace_helper_trace(helper, mstate, vstate, DTRACE_HELPTRACE_DONE); /* * Restore the arg0 that we saved upon entry. */ mstate->dtms_arg[0] = sarg0; mstate->dtms_arg[1] = sarg1; return (rval); err: if (trace) dtrace_helper_trace(helper, mstate, vstate, DTRACE_HELPTRACE_ERR); /* * Restore the arg0 that we saved upon entry. */ mstate->dtms_arg[0] = sarg0; mstate->dtms_arg[1] = sarg1; return (0); } static void dtrace_helper_action_destroy(dtrace_helper_action_t *helper, dtrace_vstate_t *vstate) { int i; if (helper->dtha_predicate != NULL) dtrace_difo_release(helper->dtha_predicate, vstate); for (i = 0; i < helper->dtha_nactions; i++) { ASSERT(helper->dtha_actions[i] != NULL); dtrace_difo_release(helper->dtha_actions[i], vstate); } kmem_free(helper->dtha_actions, helper->dtha_nactions * sizeof (dtrace_difo_t *)); kmem_free(helper, sizeof (dtrace_helper_action_t)); } static int dtrace_helper_destroygen(dtrace_helpers_t *help, int gen) { proc_t *p = curproc; dtrace_vstate_t *vstate; int i; if (help == NULL) help = p->p_dtrace_helpers; ASSERT(MUTEX_HELD(&dtrace_lock)); if (help == NULL || gen > help->dthps_generation) return (EINVAL); vstate = &help->dthps_vstate; for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { dtrace_helper_action_t *last = NULL, *h, *next; for (h = help->dthps_actions[i]; h != NULL; h = next) { next = h->dtha_next; if (h->dtha_generation == gen) { if (last != NULL) { last->dtha_next = next; } else { help->dthps_actions[i] = next; } dtrace_helper_action_destroy(h, vstate); } else { last = h; } } } /* * Interate until we've cleared out all helper providers with the * given generation number. */ for (;;) { dtrace_helper_provider_t *prov; /* * Look for a helper provider with the right generation. We * have to start back at the beginning of the list each time * because we drop dtrace_lock. It's unlikely that we'll make * more than two passes. */ for (i = 0; i < help->dthps_nprovs; i++) { prov = help->dthps_provs[i]; if (prov->dthp_generation == gen) break; } /* * If there were no matches, we're done. */ if (i == help->dthps_nprovs) break; /* * Move the last helper provider into this slot. */ help->dthps_nprovs--; help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; help->dthps_provs[help->dthps_nprovs] = NULL; mutex_exit(&dtrace_lock); /* * If we have a meta provider, remove this helper provider. */ mutex_enter(&dtrace_meta_lock); if (dtrace_meta_pid != NULL) { ASSERT(dtrace_deferred_pid == NULL); dtrace_helper_provider_remove(&prov->dthp_prov, p->p_pid); } mutex_exit(&dtrace_meta_lock); dtrace_helper_provider_destroy(prov); mutex_enter(&dtrace_lock); } return (0); } static int dtrace_helper_validate(dtrace_helper_action_t *helper) { int err = 0, i; dtrace_difo_t *dp; if ((dp = helper->dtha_predicate) != NULL) err += dtrace_difo_validate_helper(dp); for (i = 0; i < helper->dtha_nactions; i++) err += dtrace_difo_validate_helper(helper->dtha_actions[i]); return (err == 0); } static int dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep, dtrace_helpers_t *help) { dtrace_helper_action_t *helper, *last; dtrace_actdesc_t *act; dtrace_vstate_t *vstate; dtrace_predicate_t *pred; int count = 0, nactions = 0, i; if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) return (EINVAL); last = help->dthps_actions[which]; vstate = &help->dthps_vstate; for (count = 0; last != NULL; last = last->dtha_next) { count++; if (last->dtha_next == NULL) break; } /* * If we already have dtrace_helper_actions_max helper actions for this * helper action type, we'll refuse to add a new one. */ if (count >= dtrace_helper_actions_max) return (ENOSPC); helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); helper->dtha_generation = help->dthps_generation; if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { ASSERT(pred->dtp_difo != NULL); dtrace_difo_hold(pred->dtp_difo); helper->dtha_predicate = pred->dtp_difo; } for (act = ep->dted_action; act != NULL; act = act->dtad_next) { if (act->dtad_kind != DTRACEACT_DIFEXPR) goto err; if (act->dtad_difo == NULL) goto err; nactions++; } helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * (helper->dtha_nactions = nactions), KM_SLEEP); for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { dtrace_difo_hold(act->dtad_difo); helper->dtha_actions[i++] = act->dtad_difo; } if (!dtrace_helper_validate(helper)) goto err; if (last == NULL) { help->dthps_actions[which] = helper; } else { last->dtha_next = helper; } if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { dtrace_helptrace_nlocals = vstate->dtvs_nlocals; dtrace_helptrace_next = 0; } return (0); err: dtrace_helper_action_destroy(helper, vstate); return (EINVAL); } static void dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, dof_helper_t *dofhp) { ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); mutex_enter(&dtrace_meta_lock); mutex_enter(&dtrace_lock); if (!dtrace_attached() || dtrace_meta_pid == NULL) { /* * If the dtrace module is loaded but not attached, or if * there aren't isn't a meta provider registered to deal with * these provider descriptions, we need to postpone creating * the actual providers until later. */ if (help->dthps_next == NULL && help->dthps_prev == NULL && dtrace_deferred_pid != help) { help->dthps_deferred = 1; help->dthps_pid = p->p_pid; help->dthps_next = dtrace_deferred_pid; help->dthps_prev = NULL; if (dtrace_deferred_pid != NULL) dtrace_deferred_pid->dthps_prev = help; dtrace_deferred_pid = help; } mutex_exit(&dtrace_lock); } else if (dofhp != NULL) { /* * If the dtrace module is loaded and we have a particular * helper provider description, pass that off to the * meta provider. */ mutex_exit(&dtrace_lock); dtrace_helper_provide(dofhp, p->p_pid); } else { /* * Otherwise, just pass all the helper provider descriptions * off to the meta provider. */ int i; mutex_exit(&dtrace_lock); for (i = 0; i < help->dthps_nprovs; i++) { dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, p->p_pid); } } mutex_exit(&dtrace_meta_lock); } static int dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen) { dtrace_helper_provider_t *hprov, **tmp_provs; uint_t tmp_maxprovs, i; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(help != NULL); /* * If we already have dtrace_helper_providers_max helper providers, * we're refuse to add a new one. */ if (help->dthps_nprovs >= dtrace_helper_providers_max) return (ENOSPC); /* * Check to make sure this isn't a duplicate. */ for (i = 0; i < help->dthps_nprovs; i++) { if (dofhp->dofhp_addr == help->dthps_provs[i]->dthp_prov.dofhp_addr) return (EALREADY); } hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); hprov->dthp_prov = *dofhp; hprov->dthp_ref = 1; hprov->dthp_generation = gen; /* * Allocate a bigger table for helper providers if it's already full. */ if (help->dthps_maxprovs == help->dthps_nprovs) { tmp_maxprovs = help->dthps_maxprovs; tmp_provs = help->dthps_provs; if (help->dthps_maxprovs == 0) help->dthps_maxprovs = 2; else help->dthps_maxprovs *= 2; if (help->dthps_maxprovs > dtrace_helper_providers_max) help->dthps_maxprovs = dtrace_helper_providers_max; ASSERT(tmp_maxprovs < help->dthps_maxprovs); help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * sizeof (dtrace_helper_provider_t *), KM_SLEEP); if (tmp_provs != NULL) { bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * sizeof (dtrace_helper_provider_t *)); kmem_free(tmp_provs, tmp_maxprovs * sizeof (dtrace_helper_provider_t *)); } } help->dthps_provs[help->dthps_nprovs] = hprov; help->dthps_nprovs++; return (0); } static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) { mutex_enter(&dtrace_lock); if (--hprov->dthp_ref == 0) { dof_hdr_t *dof; mutex_exit(&dtrace_lock); dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; dtrace_dof_destroy(dof); kmem_free(hprov, sizeof (dtrace_helper_provider_t)); } else { mutex_exit(&dtrace_lock); } } static int dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) { uintptr_t daddr = (uintptr_t)dof; dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; dof_provider_t *provider; dof_probe_t *probe; uint8_t *arg; char *strtab, *typestr; dof_stridx_t typeidx; size_t typesz; uint_t nprobes, j, k; ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); if (sec->dofs_offset & (sizeof (uint_t) - 1)) { dtrace_dof_error(dof, "misaligned section offset"); return (-1); } /* * The section needs to be large enough to contain the DOF provider * structure appropriate for the given version. */ if (sec->dofs_size < ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? offsetof(dof_provider_t, dofpv_prenoffs) : sizeof (dof_provider_t))) { dtrace_dof_error(dof, "provider section too small"); return (-1); } provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); if (str_sec == NULL || prb_sec == NULL || arg_sec == NULL || off_sec == NULL) return (-1); enoff_sec = NULL; if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && provider->dofpv_prenoffs != DOF_SECT_NONE && (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, provider->dofpv_prenoffs)) == NULL) return (-1); strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); if (provider->dofpv_name >= str_sec->dofs_size || strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { dtrace_dof_error(dof, "invalid provider name"); return (-1); } if (prb_sec->dofs_entsize == 0 || prb_sec->dofs_entsize > prb_sec->dofs_size) { dtrace_dof_error(dof, "invalid entry size"); return (-1); } if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { dtrace_dof_error(dof, "misaligned entry size"); return (-1); } if (off_sec->dofs_entsize != sizeof (uint32_t)) { dtrace_dof_error(dof, "invalid entry size"); return (-1); } if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { dtrace_dof_error(dof, "misaligned section offset"); return (-1); } if (arg_sec->dofs_entsize != sizeof (uint8_t)) { dtrace_dof_error(dof, "invalid entry size"); return (-1); } arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; /* * Take a pass through the probes to check for errors. */ for (j = 0; j < nprobes; j++) { probe = (dof_probe_t *)(uintptr_t)(daddr + prb_sec->dofs_offset + j * prb_sec->dofs_entsize); if (probe->dofpr_func >= str_sec->dofs_size) { dtrace_dof_error(dof, "invalid function name"); return (-1); } if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { dtrace_dof_error(dof, "function name too long"); /* * Keep going if the function name is too long. * Unlike provider and probe names, we cannot reasonably * impose restrictions on function names, since they're * a property of the code being instrumented. We will * skip this probe in dtrace_helper_provide_one(). */ } if (probe->dofpr_name >= str_sec->dofs_size || strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { dtrace_dof_error(dof, "invalid probe name"); return (-1); } /* * The offset count must not wrap the index, and the offsets * must also not overflow the section's data. */ if (probe->dofpr_offidx + probe->dofpr_noffs < probe->dofpr_offidx || (probe->dofpr_offidx + probe->dofpr_noffs) * off_sec->dofs_entsize > off_sec->dofs_size) { dtrace_dof_error(dof, "invalid probe offset"); return (-1); } if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { /* * If there's no is-enabled offset section, make sure * there aren't any is-enabled offsets. Otherwise * perform the same checks as for probe offsets * (immediately above). */ if (enoff_sec == NULL) { if (probe->dofpr_enoffidx != 0 || probe->dofpr_nenoffs != 0) { dtrace_dof_error(dof, "is-enabled " "offsets with null section"); return (-1); } } else if (probe->dofpr_enoffidx + probe->dofpr_nenoffs < probe->dofpr_enoffidx || (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * enoff_sec->dofs_entsize > enoff_sec->dofs_size) { dtrace_dof_error(dof, "invalid is-enabled " "offset"); return (-1); } if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { dtrace_dof_error(dof, "zero probe and " "is-enabled offsets"); return (-1); } } else if (probe->dofpr_noffs == 0) { dtrace_dof_error(dof, "zero probe offsets"); return (-1); } if (probe->dofpr_argidx + probe->dofpr_xargc < probe->dofpr_argidx || (probe->dofpr_argidx + probe->dofpr_xargc) * arg_sec->dofs_entsize > arg_sec->dofs_size) { dtrace_dof_error(dof, "invalid args"); return (-1); } typeidx = probe->dofpr_nargv; typestr = strtab + probe->dofpr_nargv; for (k = 0; k < probe->dofpr_nargc; k++) { if (typeidx >= str_sec->dofs_size) { dtrace_dof_error(dof, "bad " "native argument type"); return (-1); } typesz = strlen(typestr) + 1; if (typesz > DTRACE_ARGTYPELEN) { dtrace_dof_error(dof, "native " "argument type too long"); return (-1); } typeidx += typesz; typestr += typesz; } typeidx = probe->dofpr_xargv; typestr = strtab + probe->dofpr_xargv; for (k = 0; k < probe->dofpr_xargc; k++) { if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { dtrace_dof_error(dof, "bad " "native argument index"); return (-1); } if (typeidx >= str_sec->dofs_size) { dtrace_dof_error(dof, "bad " "translated argument type"); return (-1); } typesz = strlen(typestr) + 1; if (typesz > DTRACE_ARGTYPELEN) { dtrace_dof_error(dof, "translated argument " "type too long"); return (-1); } typeidx += typesz; typestr += typesz; } } return (0); } static int -#ifdef __FreeBSD__ dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp, struct proc *p) -#else -dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) -#endif { dtrace_helpers_t *help; dtrace_vstate_t *vstate; dtrace_enabling_t *enab = NULL; -#ifndef __FreeBSD__ - proc_t *p = curproc; -#endif int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; uintptr_t daddr = (uintptr_t)dof; ASSERT(MUTEX_HELD(&dtrace_lock)); if ((help = p->p_dtrace_helpers) == NULL) help = dtrace_helpers_create(p); vstate = &help->dthps_vstate; - if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, - dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { + if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, dhp->dofhp_addr, + B_FALSE)) != 0) { dtrace_dof_destroy(dof); return (rv); } /* * Look for helper providers and validate their descriptions. */ - if (dhp != NULL) { - for (i = 0; i < dof->dofh_secnum; i++) { - dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + - dof->dofh_secoff + i * dof->dofh_secsize); + for (i = 0; i < dof->dofh_secnum; i++) { + dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + + dof->dofh_secoff + i * dof->dofh_secsize); - if (sec->dofs_type != DOF_SECT_PROVIDER) - continue; + if (sec->dofs_type != DOF_SECT_PROVIDER) + continue; - if (dtrace_helper_provider_validate(dof, sec) != 0) { - dtrace_enabling_destroy(enab); - dtrace_dof_destroy(dof); - return (-1); - } - - nprovs++; + if (dtrace_helper_provider_validate(dof, sec) != 0) { + dtrace_enabling_destroy(enab); + dtrace_dof_destroy(dof); + return (-1); } + + nprovs++; } /* * Now we need to walk through the ECB descriptions in the enabling. */ for (i = 0; i < enab->dten_ndesc; i++) { dtrace_ecbdesc_t *ep = enab->dten_desc[i]; dtrace_probedesc_t *desc = &ep->dted_probe; if (strcmp(desc->dtpd_provider, "dtrace") != 0) continue; if (strcmp(desc->dtpd_mod, "helper") != 0) continue; if (strcmp(desc->dtpd_func, "ustack") != 0) continue; if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, ep, help)) != 0) { /* * Adding this helper action failed -- we are now going * to rip out the entire generation and return failure. */ (void) dtrace_helper_destroygen(help, help->dthps_generation); dtrace_enabling_destroy(enab); dtrace_dof_destroy(dof); return (-1); } nhelpers++; } if (nhelpers < enab->dten_ndesc) dtrace_dof_error(dof, "unmatched helpers"); gen = help->dthps_generation++; dtrace_enabling_destroy(enab); - if (dhp != NULL && nprovs > 0) { + if (nprovs > 0) { /* * Now that this is in-kernel, we change the sense of the * members: dofhp_dof denotes the in-kernel copy of the DOF * and dofhp_addr denotes the address at user-level. */ dhp->dofhp_addr = dhp->dofhp_dof; dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; if (dtrace_helper_provider_add(dhp, help, gen) == 0) { mutex_exit(&dtrace_lock); dtrace_helper_provider_register(p, help, dhp); mutex_enter(&dtrace_lock); destroy = 0; } } if (destroy) dtrace_dof_destroy(dof); return (gen); } static dtrace_helpers_t * dtrace_helpers_create(proc_t *p) { dtrace_helpers_t *help; ASSERT(MUTEX_HELD(&dtrace_lock)); ASSERT(p->p_dtrace_helpers == NULL); help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS, KM_SLEEP); p->p_dtrace_helpers = help; dtrace_helpers++; return (help); } #ifdef illumos static #endif void dtrace_helpers_destroy(proc_t *p) { dtrace_helpers_t *help; dtrace_vstate_t *vstate; #ifdef illumos proc_t *p = curproc; #endif int i; mutex_enter(&dtrace_lock); ASSERT(p->p_dtrace_helpers != NULL); ASSERT(dtrace_helpers > 0); help = p->p_dtrace_helpers; vstate = &help->dthps_vstate; /* * We're now going to lose the help from this process. */ p->p_dtrace_helpers = NULL; dtrace_sync(); /* * Destory the helper actions. */ for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { dtrace_helper_action_t *h, *next; for (h = help->dthps_actions[i]; h != NULL; h = next) { next = h->dtha_next; dtrace_helper_action_destroy(h, vstate); h = next; } } mutex_exit(&dtrace_lock); /* * Destroy the helper providers. */ if (help->dthps_maxprovs > 0) { mutex_enter(&dtrace_meta_lock); if (dtrace_meta_pid != NULL) { ASSERT(dtrace_deferred_pid == NULL); for (i = 0; i < help->dthps_nprovs; i++) { dtrace_helper_provider_remove( &help->dthps_provs[i]->dthp_prov, p->p_pid); } } else { mutex_enter(&dtrace_lock); ASSERT(help->dthps_deferred == 0 || help->dthps_next != NULL || help->dthps_prev != NULL || help == dtrace_deferred_pid); /* * Remove the helper from the deferred list. */ if (help->dthps_next != NULL) help->dthps_next->dthps_prev = help->dthps_prev; if (help->dthps_prev != NULL) help->dthps_prev->dthps_next = help->dthps_next; if (dtrace_deferred_pid == help) { dtrace_deferred_pid = help->dthps_next; ASSERT(help->dthps_prev == NULL); } mutex_exit(&dtrace_lock); } mutex_exit(&dtrace_meta_lock); for (i = 0; i < help->dthps_nprovs; i++) { dtrace_helper_provider_destroy(help->dthps_provs[i]); } kmem_free(help->dthps_provs, help->dthps_maxprovs * sizeof (dtrace_helper_provider_t *)); } mutex_enter(&dtrace_lock); dtrace_vstate_fini(&help->dthps_vstate); kmem_free(help->dthps_actions, sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); kmem_free(help, sizeof (dtrace_helpers_t)); --dtrace_helpers; mutex_exit(&dtrace_lock); } #ifdef illumos static #endif void dtrace_helpers_duplicate(proc_t *from, proc_t *to) { dtrace_helpers_t *help, *newhelp; dtrace_helper_action_t *helper, *new, *last; dtrace_difo_t *dp; dtrace_vstate_t *vstate; int i, j, sz, hasprovs = 0; mutex_enter(&dtrace_lock); ASSERT(from->p_dtrace_helpers != NULL); ASSERT(dtrace_helpers > 0); help = from->p_dtrace_helpers; newhelp = dtrace_helpers_create(to); ASSERT(to->p_dtrace_helpers != NULL); newhelp->dthps_generation = help->dthps_generation; vstate = &newhelp->dthps_vstate; /* * Duplicate the helper actions. */ for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { if ((helper = help->dthps_actions[i]) == NULL) continue; for (last = NULL; helper != NULL; helper = helper->dtha_next) { new = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); new->dtha_generation = helper->dtha_generation; if ((dp = helper->dtha_predicate) != NULL) { dp = dtrace_difo_duplicate(dp, vstate); new->dtha_predicate = dp; } new->dtha_nactions = helper->dtha_nactions; sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; new->dtha_actions = kmem_alloc(sz, KM_SLEEP); for (j = 0; j < new->dtha_nactions; j++) { dtrace_difo_t *dp = helper->dtha_actions[j]; ASSERT(dp != NULL); dp = dtrace_difo_duplicate(dp, vstate); new->dtha_actions[j] = dp; } if (last != NULL) { last->dtha_next = new; } else { newhelp->dthps_actions[i] = new; } last = new; } } /* * Duplicate the helper providers and register them with the * DTrace framework. */ if (help->dthps_nprovs > 0) { newhelp->dthps_nprovs = help->dthps_nprovs; newhelp->dthps_maxprovs = help->dthps_nprovs; newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * sizeof (dtrace_helper_provider_t *), KM_SLEEP); for (i = 0; i < newhelp->dthps_nprovs; i++) { newhelp->dthps_provs[i] = help->dthps_provs[i]; newhelp->dthps_provs[i]->dthp_ref++; } hasprovs = 1; } mutex_exit(&dtrace_lock); if (hasprovs) dtrace_helper_provider_register(to, newhelp, NULL); } /* * DTrace Hook Functions */ static void dtrace_module_loaded(modctl_t *ctl) { dtrace_provider_t *prv; mutex_enter(&dtrace_provider_lock); #ifdef illumos mutex_enter(&mod_lock); #endif #ifdef illumos ASSERT(ctl->mod_busy); #endif /* * We're going to call each providers per-module provide operation * specifying only this module. */ for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); /* * If we have any retained enablings, we need to match against them. * Enabling probes requires that cpu_lock be held, and we cannot hold * cpu_lock here -- it is legal for cpu_lock to be held when loading a * module. (In particular, this happens when loading scheduling * classes.) So if we have any retained enablings, we need to dispatch * our task queue to do the match for us. */ mutex_enter(&dtrace_lock); if (dtrace_retained == NULL) { mutex_exit(&dtrace_lock); return; } (void) taskq_dispatch(dtrace_taskq, (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); mutex_exit(&dtrace_lock); /* * And now, for a little heuristic sleaze: in general, we want to * match modules as soon as they load. However, we cannot guarantee * this, because it would lead us to the lock ordering violation * outlined above. The common case, of course, is that cpu_lock is * _not_ held -- so we delay here for a clock tick, hoping that that's * long enough for the task queue to do its work. If it's not, it's * not a serious problem -- it just means that the module that we * just loaded may not be immediately instrumentable. */ delay(1); } static void #ifdef illumos dtrace_module_unloaded(modctl_t *ctl) #else dtrace_module_unloaded(modctl_t *ctl, int *error) #endif { dtrace_probe_t template, *probe, *first, *next; dtrace_provider_t *prov; #ifndef illumos char modname[DTRACE_MODNAMELEN]; size_t len; #endif #ifdef illumos template.dtpr_mod = ctl->mod_modname; #else /* Handle the fact that ctl->filename may end in ".ko". */ strlcpy(modname, ctl->filename, sizeof(modname)); len = strlen(ctl->filename); if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) modname[len - 3] = '\0'; template.dtpr_mod = modname; #endif mutex_enter(&dtrace_provider_lock); #ifdef illumos mutex_enter(&mod_lock); #endif mutex_enter(&dtrace_lock); #ifndef illumos if (ctl->nenabled > 0) { /* Don't allow unloads if a probe is enabled. */ mutex_exit(&dtrace_provider_lock); mutex_exit(&dtrace_lock); *error = -1; printf( "kldunload: attempt to unload module that has DTrace probes enabled\n"); return; } #endif if (dtrace_bymod == NULL) { /* * The DTrace module is loaded (obviously) but not attached; * we don't have any work to do. */ mutex_exit(&dtrace_provider_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_lock); return; } for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); probe != NULL; probe = probe->dtpr_nextmod) { if (probe->dtpr_ecb != NULL) { mutex_exit(&dtrace_provider_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_lock); /* * This shouldn't _actually_ be possible -- we're * unloading a module that has an enabled probe in it. * (It's normally up to the provider to make sure that * this can't happen.) However, because dtps_enable() * doesn't have a failure mode, there can be an * enable/unload race. Upshot: we don't want to * assert, but we're not going to disable the * probe, either. */ if (dtrace_err_verbose) { #ifdef illumos cmn_err(CE_WARN, "unloaded module '%s' had " "enabled probes", ctl->mod_modname); #else cmn_err(CE_WARN, "unloaded module '%s' had " "enabled probes", modname); #endif } return; } } probe = first; for (first = NULL; probe != NULL; probe = next) { ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); dtrace_probes[probe->dtpr_id - 1] = NULL; next = probe->dtpr_nextmod; dtrace_hash_remove(dtrace_bymod, probe); dtrace_hash_remove(dtrace_byfunc, probe); dtrace_hash_remove(dtrace_byname, probe); if (first == NULL) { first = probe; probe->dtpr_nextmod = NULL; } else { probe->dtpr_nextmod = first; first = probe; } } /* * We've removed all of the module's probes from the hash chains and * from the probe array. Now issue a dtrace_sync() to be sure that * everyone has cleared out from any probe array processing. */ dtrace_sync(); for (probe = first; probe != NULL; probe = first) { first = probe->dtpr_nextmod; prov = probe->dtpr_provider; prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, probe->dtpr_arg); kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); #ifdef illumos vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); #else free_unr(dtrace_arena, probe->dtpr_id); #endif kmem_free(probe, sizeof (dtrace_probe_t)); } mutex_exit(&dtrace_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); } #ifndef illumos static void dtrace_kld_load(void *arg __unused, linker_file_t lf) { dtrace_module_loaded(lf); } static void dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) { if (*error != 0) /* We already have an error, so don't do anything. */ return; dtrace_module_unloaded(lf, error); } #endif #ifdef illumos static void dtrace_suspend(void) { dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); } static void dtrace_resume(void) { dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); } #endif static int dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) { ASSERT(MUTEX_HELD(&cpu_lock)); mutex_enter(&dtrace_lock); switch (what) { case CPU_CONFIG: { dtrace_state_t *state; dtrace_optval_t *opt, rs, c; /* * For now, we only allocate a new buffer for anonymous state. */ if ((state = dtrace_anon.dta_state) == NULL) break; if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) break; opt = state->dts_options; c = opt[DTRACEOPT_CPU]; if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) break; /* * Regardless of what the actual policy is, we're going to * temporarily set our resize policy to be manual. We're * also going to temporarily set our CPU option to denote * the newly configured CPU. */ rs = opt[DTRACEOPT_BUFRESIZE]; opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; (void) dtrace_state_buffers(state); opt[DTRACEOPT_BUFRESIZE] = rs; opt[DTRACEOPT_CPU] = c; break; } case CPU_UNCONFIG: /* * We don't free the buffer in the CPU_UNCONFIG case. (The * buffer will be freed when the consumer exits.) */ break; default: break; } mutex_exit(&dtrace_lock); return (0); } #ifdef illumos static void dtrace_cpu_setup_initial(processorid_t cpu) { (void) dtrace_cpu_setup(CPU_CONFIG, cpu); } #endif static void dtrace_toxrange_add(uintptr_t base, uintptr_t limit) { if (dtrace_toxranges >= dtrace_toxranges_max) { int osize, nsize; dtrace_toxrange_t *range; osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); if (osize == 0) { ASSERT(dtrace_toxrange == NULL); ASSERT(dtrace_toxranges_max == 0); dtrace_toxranges_max = 1; } else { dtrace_toxranges_max <<= 1; } nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); range = kmem_zalloc(nsize, KM_SLEEP); if (dtrace_toxrange != NULL) { ASSERT(osize != 0); bcopy(dtrace_toxrange, range, osize); kmem_free(dtrace_toxrange, osize); } dtrace_toxrange = range; } ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); dtrace_toxrange[dtrace_toxranges].dtt_base = base; dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; dtrace_toxranges++; } static void dtrace_getf_barrier() { #ifdef illumos /* * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings * that contain calls to getf(), this routine will be called on every * closef() before either the underlying vnode is released or the * file_t itself is freed. By the time we are here, it is essential * that the file_t can no longer be accessed from a call to getf() * in probe context -- that assures that a dtrace_sync() can be used * to clear out any enablings referring to the old structures. */ if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 || kcred->cr_zone->zone_dtrace_getf != 0) dtrace_sync(); #endif } /* * DTrace Driver Cookbook Functions */ #ifdef illumos /*ARGSUSED*/ static int dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { dtrace_provider_id_t id; dtrace_state_t *state = NULL; dtrace_enabling_t *enab; mutex_enter(&cpu_lock); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); if (ddi_soft_state_init(&dtrace_softstate, sizeof (dtrace_state_t), 0) != 0) { cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); mutex_exit(&cpu_lock); mutex_exit(&dtrace_provider_lock); mutex_exit(&dtrace_lock); return (DDI_FAILURE); } if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); ddi_remove_minor_node(devi, NULL); ddi_soft_state_fini(&dtrace_softstate); mutex_exit(&cpu_lock); mutex_exit(&dtrace_provider_lock); mutex_exit(&dtrace_lock); return (DDI_FAILURE); } ddi_report_dev(devi); dtrace_devi = devi; dtrace_modload = dtrace_module_loaded; dtrace_modunload = dtrace_module_unloaded; dtrace_cpu_init = dtrace_cpu_setup_initial; dtrace_helpers_cleanup = dtrace_helpers_destroy; dtrace_helpers_fork = dtrace_helpers_duplicate; dtrace_cpustart_init = dtrace_suspend; dtrace_cpustart_fini = dtrace_resume; dtrace_debugger_init = dtrace_suspend; dtrace_debugger_fini = dtrace_resume; register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 1, INT_MAX, 0); dtrace_state_cache = kmem_cache_create("dtrace_state_cache", sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), offsetof(dtrace_probe_t, dtpr_nextmod), offsetof(dtrace_probe_t, dtpr_prevmod)); dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), offsetof(dtrace_probe_t, dtpr_nextfunc), offsetof(dtrace_probe_t, dtpr_prevfunc)); dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), offsetof(dtrace_probe_t, dtpr_nextname), offsetof(dtrace_probe_t, dtpr_prevname)); if (dtrace_retain_max < 1) { cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " "setting to 1", dtrace_retain_max); dtrace_retain_max = 1; } /* * Now discover our toxic ranges. */ dtrace_toxic_ranges(dtrace_toxrange_add); /* * Before we register ourselves as a provider to our own framework, * we would like to assert that dtrace_provider is NULL -- but that's * not true if we were loaded as a dependency of a DTrace provider. * Once we've registered, we can assert that dtrace_provider is our * pseudo provider. */ (void) dtrace_register("dtrace", &dtrace_provider_attr, DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); ASSERT(dtrace_provider != NULL); ASSERT((dtrace_provider_id_t)dtrace_provider == id); dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "END", 0, NULL); dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "ERROR", 1, NULL); dtrace_anon_property(); mutex_exit(&cpu_lock); /* * If there are already providers, we must ask them to provide their * probes, and then match any anonymous enabling against them. Note * that there should be no other retained enablings at this time: * the only retained enablings at this time should be the anonymous * enabling. */ if (dtrace_anon.dta_enabling != NULL) { ASSERT(dtrace_retained == dtrace_anon.dta_enabling); dtrace_enabling_provide(NULL); state = dtrace_anon.dta_state; /* * We couldn't hold cpu_lock across the above call to * dtrace_enabling_provide(), but we must hold it to actually * enable the probes. We have to drop all of our locks, pick * up cpu_lock, and regain our locks before matching the * retained anonymous enabling. */ mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); mutex_enter(&cpu_lock); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); if ((enab = dtrace_anon.dta_enabling) != NULL) (void) dtrace_enabling_match(enab, NULL); mutex_exit(&cpu_lock); } mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); if (state != NULL) { /* * If we created any anonymous state, set it going now. */ (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); } return (DDI_SUCCESS); } #endif /* illumos */ #ifndef illumos static void dtrace_dtr(void *); #endif /*ARGSUSED*/ static int #ifdef illumos dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) #else dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) #endif { dtrace_state_t *state; uint32_t priv; uid_t uid; zoneid_t zoneid; #ifdef illumos if (getminor(*devp) == DTRACEMNRN_HELPER) return (0); /* * If this wasn't an open with the "helper" minor, then it must be * the "dtrace" minor. */ if (getminor(*devp) == DTRACEMNRN_DTRACE) return (ENXIO); #else cred_t *cred_p = NULL; cred_p = dev->si_cred; /* * If no DTRACE_PRIV_* bits are set in the credential, then the * caller lacks sufficient permission to do anything with DTrace. */ dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); if (priv == DTRACE_PRIV_NONE) { #endif return (EACCES); } /* * Ask all providers to provide all their probes. */ mutex_enter(&dtrace_provider_lock); dtrace_probe_provide(NULL, NULL); mutex_exit(&dtrace_provider_lock); mutex_enter(&cpu_lock); mutex_enter(&dtrace_lock); dtrace_opens++; dtrace_membar_producer(); #ifdef illumos /* * If the kernel debugger is active (that is, if the kernel debugger * modified text in some way), we won't allow the open. */ if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { dtrace_opens--; mutex_exit(&cpu_lock); mutex_exit(&dtrace_lock); return (EBUSY); } if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) { /* * If DTrace helper tracing is enabled, we need to allocate the * trace buffer and initialize the values. */ dtrace_helptrace_buffer = kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); dtrace_helptrace_next = 0; dtrace_helptrace_wrapped = 0; dtrace_helptrace_enable = 0; } state = dtrace_state_create(devp, cred_p); #else state = dtrace_state_create(dev, NULL); devfs_set_cdevpriv(state, dtrace_dtr); #endif mutex_exit(&cpu_lock); if (state == NULL) { #ifdef illumos if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); #else --dtrace_opens; #endif mutex_exit(&dtrace_lock); return (EAGAIN); } mutex_exit(&dtrace_lock); return (0); } /*ARGSUSED*/ #ifdef illumos static int dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) #else static void dtrace_dtr(void *data) #endif { #ifdef illumos minor_t minor = getminor(dev); dtrace_state_t *state; #endif dtrace_helptrace_t *buf = NULL; #ifdef illumos if (minor == DTRACEMNRN_HELPER) return (0); state = ddi_get_soft_state(dtrace_softstate, minor); #else dtrace_state_t *state = data; #endif mutex_enter(&cpu_lock); mutex_enter(&dtrace_lock); #ifdef illumos if (state->dts_anon) #else if (state != NULL && state->dts_anon) #endif { /* * There is anonymous state. Destroy that first. */ ASSERT(dtrace_anon.dta_state == NULL); dtrace_state_destroy(state->dts_anon); } if (dtrace_helptrace_disable) { /* * If we have been told to disable helper tracing, set the * buffer to NULL before calling into dtrace_state_destroy(); * we take advantage of its dtrace_sync() to know that no * CPU is in probe context with enabled helper tracing * after it returns. */ buf = dtrace_helptrace_buffer; dtrace_helptrace_buffer = NULL; } #ifdef illumos dtrace_state_destroy(state); #else if (state != NULL) { dtrace_state_destroy(state); kmem_free(state, 0); } #endif ASSERT(dtrace_opens > 0); #ifdef illumos /* * Only relinquish control of the kernel debugger interface when there * are no consumers and no anonymous enablings. */ if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); #else --dtrace_opens; #endif if (buf != NULL) { kmem_free(buf, dtrace_helptrace_bufsize); dtrace_helptrace_disable = 0; } mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); #ifdef illumos return (0); #endif } #ifdef illumos /*ARGSUSED*/ static int dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) { int rval; dof_helper_t help, *dhp = NULL; switch (cmd) { case DTRACEHIOC_ADDDOF: if (copyin((void *)arg, &help, sizeof (help)) != 0) { dtrace_dof_error(NULL, "failed to copyin DOF helper"); return (EFAULT); } dhp = &help; arg = (intptr_t)help.dofhp_dof; /*FALLTHROUGH*/ case DTRACEHIOC_ADD: { dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); if (dof == NULL) return (rval); mutex_enter(&dtrace_lock); /* * dtrace_helper_slurp() takes responsibility for the dof -- * it may free it now or it may save it and free it later. */ if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { *rv = rval; rval = 0; } else { rval = EINVAL; } mutex_exit(&dtrace_lock); return (rval); } case DTRACEHIOC_REMOVE: { mutex_enter(&dtrace_lock); rval = dtrace_helper_destroygen(NULL, arg); mutex_exit(&dtrace_lock); return (rval); } default: break; } return (ENOTTY); } /*ARGSUSED*/ static int dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) { minor_t minor = getminor(dev); dtrace_state_t *state; int rval; if (minor == DTRACEMNRN_HELPER) return (dtrace_ioctl_helper(cmd, arg, rv)); state = ddi_get_soft_state(dtrace_softstate, minor); if (state->dts_anon) { ASSERT(dtrace_anon.dta_state == NULL); state = state->dts_anon; } switch (cmd) { case DTRACEIOC_PROVIDER: { dtrace_providerdesc_t pvd; dtrace_provider_t *pvp; if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) return (EFAULT); pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; mutex_enter(&dtrace_provider_lock); for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) break; } mutex_exit(&dtrace_provider_lock); if (pvp == NULL) return (ESRCH); bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) return (EFAULT); return (0); } case DTRACEIOC_EPROBE: { dtrace_eprobedesc_t epdesc; dtrace_ecb_t *ecb; dtrace_action_t *act; void *buf; size_t size; uintptr_t dest; int nrecs; if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) return (EFAULT); mutex_enter(&dtrace_lock); if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { mutex_exit(&dtrace_lock); return (EINVAL); } if (ecb->dte_probe == NULL) { mutex_exit(&dtrace_lock); return (EINVAL); } epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; epdesc.dtepd_uarg = ecb->dte_uarg; epdesc.dtepd_size = ecb->dte_size; nrecs = epdesc.dtepd_nrecs; epdesc.dtepd_nrecs = 0; for (act = ecb->dte_action; act != NULL; act = act->dta_next) { if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) continue; epdesc.dtepd_nrecs++; } /* * Now that we have the size, we need to allocate a temporary * buffer in which to store the complete description. We need * the temporary buffer to be able to drop dtrace_lock() * across the copyout(), below. */ size = sizeof (dtrace_eprobedesc_t) + (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); buf = kmem_alloc(size, KM_SLEEP); dest = (uintptr_t)buf; bcopy(&epdesc, (void *)dest, sizeof (epdesc)); dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); for (act = ecb->dte_action; act != NULL; act = act->dta_next) { if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) continue; if (nrecs-- == 0) break; bcopy(&act->dta_rec, (void *)dest, sizeof (dtrace_recdesc_t)); dest += sizeof (dtrace_recdesc_t); } mutex_exit(&dtrace_lock); if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { kmem_free(buf, size); return (EFAULT); } kmem_free(buf, size); return (0); } case DTRACEIOC_AGGDESC: { dtrace_aggdesc_t aggdesc; dtrace_action_t *act; dtrace_aggregation_t *agg; int nrecs; uint32_t offs; dtrace_recdesc_t *lrec; void *buf; size_t size; uintptr_t dest; if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) return (EFAULT); mutex_enter(&dtrace_lock); if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { mutex_exit(&dtrace_lock); return (EINVAL); } aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; nrecs = aggdesc.dtagd_nrecs; aggdesc.dtagd_nrecs = 0; offs = agg->dtag_base; lrec = &agg->dtag_action.dta_rec; aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; for (act = agg->dtag_first; ; act = act->dta_next) { ASSERT(act->dta_intuple || DTRACEACT_ISAGG(act->dta_kind)); /* * If this action has a record size of zero, it * denotes an argument to the aggregating action. * Because the presence of this record doesn't (or * shouldn't) affect the way the data is interpreted, * we don't copy it out to save user-level the * confusion of dealing with a zero-length record. */ if (act->dta_rec.dtrd_size == 0) { ASSERT(agg->dtag_hasarg); continue; } aggdesc.dtagd_nrecs++; if (act == &agg->dtag_action) break; } /* * Now that we have the size, we need to allocate a temporary * buffer in which to store the complete description. We need * the temporary buffer to be able to drop dtrace_lock() * across the copyout(), below. */ size = sizeof (dtrace_aggdesc_t) + (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); buf = kmem_alloc(size, KM_SLEEP); dest = (uintptr_t)buf; bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); for (act = agg->dtag_first; ; act = act->dta_next) { dtrace_recdesc_t rec = act->dta_rec; /* * See the comment in the above loop for why we pass * over zero-length records. */ if (rec.dtrd_size == 0) { ASSERT(agg->dtag_hasarg); continue; } if (nrecs-- == 0) break; rec.dtrd_offset -= offs; bcopy(&rec, (void *)dest, sizeof (rec)); dest += sizeof (dtrace_recdesc_t); if (act == &agg->dtag_action) break; } mutex_exit(&dtrace_lock); if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { kmem_free(buf, size); return (EFAULT); } kmem_free(buf, size); return (0); } case DTRACEIOC_ENABLE: { dof_hdr_t *dof; dtrace_enabling_t *enab = NULL; dtrace_vstate_t *vstate; int err = 0; *rv = 0; /* * If a NULL argument has been passed, we take this as our * cue to reevaluate our enablings. */ if (arg == NULL) { dtrace_enabling_matchall(); return (0); } if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) return (rval); mutex_enter(&cpu_lock); mutex_enter(&dtrace_lock); vstate = &state->dts_vstate; if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); dtrace_dof_destroy(dof); return (EBUSY); } if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); dtrace_dof_destroy(dof); return (EINVAL); } if ((rval = dtrace_dof_options(dof, state)) != 0) { dtrace_enabling_destroy(enab); mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); dtrace_dof_destroy(dof); return (rval); } if ((err = dtrace_enabling_match(enab, rv)) == 0) { err = dtrace_enabling_retain(enab); } else { dtrace_enabling_destroy(enab); } mutex_exit(&cpu_lock); mutex_exit(&dtrace_lock); dtrace_dof_destroy(dof); return (err); } case DTRACEIOC_REPLICATE: { dtrace_repldesc_t desc; dtrace_probedesc_t *match = &desc.dtrpd_match; dtrace_probedesc_t *create = &desc.dtrpd_create; int err; if (copyin((void *)arg, &desc, sizeof (desc)) != 0) return (EFAULT); match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; mutex_enter(&dtrace_lock); err = dtrace_enabling_replicate(state, match, create); mutex_exit(&dtrace_lock); return (err); } case DTRACEIOC_PROBEMATCH: case DTRACEIOC_PROBES: { dtrace_probe_t *probe = NULL; dtrace_probedesc_t desc; dtrace_probekey_t pkey; dtrace_id_t i; int m = 0; uint32_t priv; uid_t uid; zoneid_t zoneid; if (copyin((void *)arg, &desc, sizeof (desc)) != 0) return (EFAULT); desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; /* * Before we attempt to match this probe, we want to give * all providers the opportunity to provide it. */ if (desc.dtpd_id == DTRACE_IDNONE) { mutex_enter(&dtrace_provider_lock); dtrace_probe_provide(&desc, NULL); mutex_exit(&dtrace_provider_lock); desc.dtpd_id++; } if (cmd == DTRACEIOC_PROBEMATCH) { dtrace_probekey(&desc, &pkey); pkey.dtpk_id = DTRACE_IDNONE; } dtrace_cred2priv(cr, &priv, &uid, &zoneid); mutex_enter(&dtrace_lock); if (cmd == DTRACEIOC_PROBEMATCH) { for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { if ((probe = dtrace_probes[i - 1]) != NULL && (m = dtrace_match_probe(probe, &pkey, priv, uid, zoneid)) != 0) break; } if (m < 0) { mutex_exit(&dtrace_lock); return (EINVAL); } } else { for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { if ((probe = dtrace_probes[i - 1]) != NULL && dtrace_match_priv(probe, priv, uid, zoneid)) break; } } if (probe == NULL) { mutex_exit(&dtrace_lock); return (ESRCH); } dtrace_probe_description(probe, &desc); mutex_exit(&dtrace_lock); if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) return (EFAULT); return (0); } case DTRACEIOC_PROBEARG: { dtrace_argdesc_t desc; dtrace_probe_t *probe; dtrace_provider_t *prov; if (copyin((void *)arg, &desc, sizeof (desc)) != 0) return (EFAULT); if (desc.dtargd_id == DTRACE_IDNONE) return (EINVAL); if (desc.dtargd_ndx == DTRACE_ARGNONE) return (EINVAL); mutex_enter(&dtrace_provider_lock); mutex_enter(&mod_lock); mutex_enter(&dtrace_lock); if (desc.dtargd_id > dtrace_nprobes) { mutex_exit(&dtrace_lock); mutex_exit(&mod_lock); mutex_exit(&dtrace_provider_lock); return (EINVAL); } if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { mutex_exit(&dtrace_lock); mutex_exit(&mod_lock); mutex_exit(&dtrace_provider_lock); return (EINVAL); } mutex_exit(&dtrace_lock); prov = probe->dtpr_provider; if (prov->dtpv_pops.dtps_getargdesc == NULL) { /* * There isn't any typed information for this probe. * Set the argument number to DTRACE_ARGNONE. */ desc.dtargd_ndx = DTRACE_ARGNONE; } else { desc.dtargd_native[0] = '\0'; desc.dtargd_xlate[0] = '\0'; desc.dtargd_mapping = desc.dtargd_ndx; prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, probe->dtpr_id, probe->dtpr_arg, &desc); } mutex_exit(&mod_lock); mutex_exit(&dtrace_provider_lock); if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) return (EFAULT); return (0); } case DTRACEIOC_GO: { processorid_t cpuid; rval = dtrace_state_go(state, &cpuid); if (rval != 0) return (rval); if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) return (EFAULT); return (0); } case DTRACEIOC_STOP: { processorid_t cpuid; mutex_enter(&dtrace_lock); rval = dtrace_state_stop(state, &cpuid); mutex_exit(&dtrace_lock); if (rval != 0) return (rval); if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) return (EFAULT); return (0); } case DTRACEIOC_DOFGET: { dof_hdr_t hdr, *dof; uint64_t len; if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) return (EFAULT); mutex_enter(&dtrace_lock); dof = dtrace_dof_create(state); mutex_exit(&dtrace_lock); len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); rval = copyout(dof, (void *)arg, len); dtrace_dof_destroy(dof); return (rval == 0 ? 0 : EFAULT); } case DTRACEIOC_AGGSNAP: case DTRACEIOC_BUFSNAP: { dtrace_bufdesc_t desc; caddr_t cached; dtrace_buffer_t *buf; if (copyin((void *)arg, &desc, sizeof (desc)) != 0) return (EFAULT); if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) return (EINVAL); mutex_enter(&dtrace_lock); if (cmd == DTRACEIOC_BUFSNAP) { buf = &state->dts_buffer[desc.dtbd_cpu]; } else { buf = &state->dts_aggbuffer[desc.dtbd_cpu]; } if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { size_t sz = buf->dtb_offset; if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { mutex_exit(&dtrace_lock); return (EBUSY); } /* * If this buffer has already been consumed, we're * going to indicate that there's nothing left here * to consume. */ if (buf->dtb_flags & DTRACEBUF_CONSUMED) { mutex_exit(&dtrace_lock); desc.dtbd_size = 0; desc.dtbd_drops = 0; desc.dtbd_errors = 0; desc.dtbd_oldest = 0; sz = sizeof (desc); if (copyout(&desc, (void *)arg, sz) != 0) return (EFAULT); return (0); } /* * If this is a ring buffer that has wrapped, we want * to copy the whole thing out. */ if (buf->dtb_flags & DTRACEBUF_WRAPPED) { dtrace_buffer_polish(buf); sz = buf->dtb_size; } if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { mutex_exit(&dtrace_lock); return (EFAULT); } desc.dtbd_size = sz; desc.dtbd_drops = buf->dtb_drops; desc.dtbd_errors = buf->dtb_errors; desc.dtbd_oldest = buf->dtb_xamot_offset; desc.dtbd_timestamp = dtrace_gethrtime(); mutex_exit(&dtrace_lock); if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) return (EFAULT); buf->dtb_flags |= DTRACEBUF_CONSUMED; return (0); } if (buf->dtb_tomax == NULL) { ASSERT(buf->dtb_xamot == NULL); mutex_exit(&dtrace_lock); return (ENOENT); } cached = buf->dtb_tomax; ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); dtrace_xcall(desc.dtbd_cpu, (dtrace_xcall_t)dtrace_buffer_switch, buf); state->dts_errors += buf->dtb_xamot_errors; /* * If the buffers did not actually switch, then the cross call * did not take place -- presumably because the given CPU is * not in the ready set. If this is the case, we'll return * ENOENT. */ if (buf->dtb_tomax == cached) { ASSERT(buf->dtb_xamot != cached); mutex_exit(&dtrace_lock); return (ENOENT); } ASSERT(cached == buf->dtb_xamot); /* * We have our snapshot; now copy it out. */ if (copyout(buf->dtb_xamot, desc.dtbd_data, buf->dtb_xamot_offset) != 0) { mutex_exit(&dtrace_lock); return (EFAULT); } desc.dtbd_size = buf->dtb_xamot_offset; desc.dtbd_drops = buf->dtb_xamot_drops; desc.dtbd_errors = buf->dtb_xamot_errors; desc.dtbd_oldest = 0; desc.dtbd_timestamp = buf->dtb_switched; mutex_exit(&dtrace_lock); /* * Finally, copy out the buffer description. */ if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) return (EFAULT); return (0); } case DTRACEIOC_CONF: { dtrace_conf_t conf; bzero(&conf, sizeof (conf)); conf.dtc_difversion = DIF_VERSION; conf.dtc_difintregs = DIF_DIR_NREGS; conf.dtc_diftupregs = DIF_DTR_NREGS; conf.dtc_ctfmodel = CTF_MODEL_NATIVE; if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) return (EFAULT); return (0); } case DTRACEIOC_STATUS: { dtrace_status_t stat; dtrace_dstate_t *dstate; int i, j; uint64_t nerrs; /* * See the comment in dtrace_state_deadman() for the reason * for setting dts_laststatus to INT64_MAX before setting * it to the correct value. */ state->dts_laststatus = INT64_MAX; dtrace_membar_producer(); state->dts_laststatus = dtrace_gethrtime(); bzero(&stat, sizeof (stat)); mutex_enter(&dtrace_lock); if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { mutex_exit(&dtrace_lock); return (ENOENT); } if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) stat.dtst_exiting = 1; nerrs = state->dts_errors; dstate = &state->dts_vstate.dtvs_dynvars; for (i = 0; i < NCPU; i++) { dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; stat.dtst_dyndrops += dcpu->dtdsc_drops; stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) stat.dtst_filled++; nerrs += state->dts_buffer[i].dtb_errors; for (j = 0; j < state->dts_nspeculations; j++) { dtrace_speculation_t *spec; dtrace_buffer_t *buf; spec = &state->dts_speculations[j]; buf = &spec->dtsp_buffer[i]; stat.dtst_specdrops += buf->dtb_xamot_drops; } } stat.dtst_specdrops_busy = state->dts_speculations_busy; stat.dtst_specdrops_unavail = state->dts_speculations_unavail; stat.dtst_stkstroverflows = state->dts_stkstroverflows; stat.dtst_dblerrors = state->dts_dblerrors; stat.dtst_killed = (state->dts_activity == DTRACE_ACTIVITY_KILLED); stat.dtst_errors = nerrs; mutex_exit(&dtrace_lock); if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) return (EFAULT); return (0); } case DTRACEIOC_FORMAT: { dtrace_fmtdesc_t fmt; char *str; int len; if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) return (EFAULT); mutex_enter(&dtrace_lock); if (fmt.dtfd_format == 0 || fmt.dtfd_format > state->dts_nformats) { mutex_exit(&dtrace_lock); return (EINVAL); } /* * Format strings are allocated contiguously and they are * never freed; if a format index is less than the number * of formats, we can assert that the format map is non-NULL * and that the format for the specified index is non-NULL. */ ASSERT(state->dts_formats != NULL); str = state->dts_formats[fmt.dtfd_format - 1]; ASSERT(str != NULL); len = strlen(str) + 1; if (len > fmt.dtfd_length) { fmt.dtfd_length = len; if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { mutex_exit(&dtrace_lock); return (EINVAL); } } else { if (copyout(str, fmt.dtfd_string, len) != 0) { mutex_exit(&dtrace_lock); return (EINVAL); } } mutex_exit(&dtrace_lock); return (0); } default: break; } return (ENOTTY); } /*ARGSUSED*/ static int dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { dtrace_state_t *state; switch (cmd) { case DDI_DETACH: break; case DDI_SUSPEND: return (DDI_SUCCESS); default: return (DDI_FAILURE); } mutex_enter(&cpu_lock); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); ASSERT(dtrace_opens == 0); if (dtrace_helpers > 0) { mutex_exit(&dtrace_provider_lock); mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); return (DDI_FAILURE); } if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { mutex_exit(&dtrace_provider_lock); mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); return (DDI_FAILURE); } dtrace_provider = NULL; if ((state = dtrace_anon_grab()) != NULL) { /* * If there were ECBs on this state, the provider should * have not been allowed to detach; assert that there is * none. */ ASSERT(state->dts_necbs == 0); dtrace_state_destroy(state); /* * If we're being detached with anonymous state, we need to * indicate to the kernel debugger that DTrace is now inactive. */ (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); } bzero(&dtrace_anon, sizeof (dtrace_anon_t)); unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); dtrace_cpu_init = NULL; dtrace_helpers_cleanup = NULL; dtrace_helpers_fork = NULL; dtrace_cpustart_init = NULL; dtrace_cpustart_fini = NULL; dtrace_debugger_init = NULL; dtrace_debugger_fini = NULL; dtrace_modload = NULL; dtrace_modunload = NULL; ASSERT(dtrace_getf == 0); ASSERT(dtrace_closef == NULL); mutex_exit(&cpu_lock); kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); dtrace_probes = NULL; dtrace_nprobes = 0; dtrace_hash_destroy(dtrace_bymod); dtrace_hash_destroy(dtrace_byfunc); dtrace_hash_destroy(dtrace_byname); dtrace_bymod = NULL; dtrace_byfunc = NULL; dtrace_byname = NULL; kmem_cache_destroy(dtrace_state_cache); vmem_destroy(dtrace_minor); vmem_destroy(dtrace_arena); if (dtrace_toxrange != NULL) { kmem_free(dtrace_toxrange, dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); dtrace_toxrange = NULL; dtrace_toxranges = 0; dtrace_toxranges_max = 0; } ddi_remove_minor_node(dtrace_devi, NULL); dtrace_devi = NULL; ddi_soft_state_fini(&dtrace_softstate); ASSERT(dtrace_vtime_references == 0); ASSERT(dtrace_opens == 0); ASSERT(dtrace_retained == NULL); mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); /* * We don't destroy the task queue until after we have dropped our * locks (taskq_destroy() may block on running tasks). To prevent * attempting to do work after we have effectively detached but before * the task queue has been destroyed, all tasks dispatched via the * task queue must check that DTrace is still attached before * performing any operation. */ taskq_destroy(dtrace_taskq); dtrace_taskq = NULL; return (DDI_SUCCESS); } #endif #ifdef illumos /*ARGSUSED*/ static int dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) { int error; switch (infocmd) { case DDI_INFO_DEVT2DEVINFO: *result = (void *)dtrace_devi; error = DDI_SUCCESS; break; case DDI_INFO_DEVT2INSTANCE: *result = (void *)0; error = DDI_SUCCESS; break; default: error = DDI_FAILURE; } return (error); } #endif #ifdef illumos static struct cb_ops dtrace_cb_ops = { dtrace_open, /* open */ dtrace_close, /* close */ nulldev, /* strategy */ nulldev, /* print */ nodev, /* dump */ nodev, /* read */ nodev, /* write */ dtrace_ioctl, /* ioctl */ nodev, /* devmap */ nodev, /* mmap */ nodev, /* segmap */ nochpoll, /* poll */ ddi_prop_op, /* cb_prop_op */ 0, /* streamtab */ D_NEW | D_MP /* Driver compatibility flag */ }; static struct dev_ops dtrace_ops = { DEVO_REV, /* devo_rev */ 0, /* refcnt */ dtrace_info, /* get_dev_info */ nulldev, /* identify */ nulldev, /* probe */ dtrace_attach, /* attach */ dtrace_detach, /* detach */ nodev, /* reset */ &dtrace_cb_ops, /* driver operations */ NULL, /* bus operations */ nodev /* dev power */ }; static struct modldrv modldrv = { &mod_driverops, /* module type (this is a pseudo driver) */ "Dynamic Tracing", /* name of module */ &dtrace_ops, /* driver ops */ }; static struct modlinkage modlinkage = { MODREV_1, (void *)&modldrv, NULL }; int _init(void) { return (mod_install(&modlinkage)); } int _info(struct modinfo *modinfop) { return (mod_info(&modlinkage, modinfop)); } int _fini(void) { return (mod_remove(&modlinkage)); } #else static d_ioctl_t dtrace_ioctl; static d_ioctl_t dtrace_ioctl_helper; static void dtrace_load(void *); static int dtrace_unload(void); static struct cdev *dtrace_dev; static struct cdev *helper_dev; void dtrace_invop_init(void); void dtrace_invop_uninit(void); static struct cdevsw dtrace_cdevsw = { .d_version = D_VERSION, .d_ioctl = dtrace_ioctl, .d_open = dtrace_open, .d_name = "dtrace", }; static struct cdevsw helper_cdevsw = { .d_version = D_VERSION, .d_ioctl = dtrace_ioctl_helper, .d_name = "helper", }; #include #include #include #include #include #include #include #include #include SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); DEV_MODULE(dtrace, dtrace_modevent, NULL); MODULE_VERSION(dtrace, 1); MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); #endif Index: projects/clang400-import/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h =================================================================== --- projects/clang400-import/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h (revision 312719) +++ projects/clang400-import/sys/cddl/contrib/opensolaris/uts/common/sys/dtrace.h (revision 312720) @@ -1,2510 +1,2509 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* * Copyright (c) 2013, Joyent, Inc. All rights reserved. * Copyright (c) 2013 by Delphix. All rights reserved. */ #ifndef _SYS_DTRACE_H #define _SYS_DTRACE_H #ifdef __cplusplus extern "C" { #endif /* * DTrace Dynamic Tracing Software: Kernel Interfaces * * Note: The contents of this file are private to the implementation of the * Solaris system and DTrace subsystem and are subject to change at any time * without notice. Applications and drivers using these interfaces will fail * to run on future releases. These interfaces should not be used for any * purpose except those expressly outlined in dtrace(7D) and libdtrace(3LIB). * Please refer to the "Solaris Dynamic Tracing Guide" for more information. */ #ifndef _ASM #include #include #include #ifdef illumos #include #else #include #include #include #include #include typedef int model_t; #endif #include #ifdef illumos #include #include #else #include #endif /* * DTrace Universal Constants and Typedefs */ #define DTRACE_CPUALL -1 /* all CPUs */ #define DTRACE_IDNONE 0 /* invalid probe identifier */ #define DTRACE_EPIDNONE 0 /* invalid enabled probe identifier */ #define DTRACE_AGGIDNONE 0 /* invalid aggregation identifier */ #define DTRACE_AGGVARIDNONE 0 /* invalid aggregation variable ID */ #define DTRACE_CACHEIDNONE 0 /* invalid predicate cache */ #define DTRACE_PROVNONE 0 /* invalid provider identifier */ #define DTRACE_METAPROVNONE 0 /* invalid meta-provider identifier */ #define DTRACE_ARGNONE -1 /* invalid argument index */ #define DTRACE_PROVNAMELEN 64 #define DTRACE_MODNAMELEN 64 #define DTRACE_FUNCNAMELEN 192 #define DTRACE_NAMELEN 64 #define DTRACE_FULLNAMELEN (DTRACE_PROVNAMELEN + DTRACE_MODNAMELEN + \ DTRACE_FUNCNAMELEN + DTRACE_NAMELEN + 4) #define DTRACE_ARGTYPELEN 128 typedef uint32_t dtrace_id_t; /* probe identifier */ typedef uint32_t dtrace_epid_t; /* enabled probe identifier */ typedef uint32_t dtrace_aggid_t; /* aggregation identifier */ typedef int64_t dtrace_aggvarid_t; /* aggregation variable identifier */ typedef uint16_t dtrace_actkind_t; /* action kind */ typedef int64_t dtrace_optval_t; /* option value */ typedef uint32_t dtrace_cacheid_t; /* predicate cache identifier */ typedef enum dtrace_probespec { DTRACE_PROBESPEC_NONE = -1, DTRACE_PROBESPEC_PROVIDER = 0, DTRACE_PROBESPEC_MOD, DTRACE_PROBESPEC_FUNC, DTRACE_PROBESPEC_NAME } dtrace_probespec_t; /* * DTrace Intermediate Format (DIF) * * The following definitions describe the DTrace Intermediate Format (DIF), a * a RISC-like instruction set and program encoding used to represent * predicates and actions that can be bound to DTrace probes. The constants * below defining the number of available registers are suggested minimums; the * compiler should use DTRACEIOC_CONF to dynamically obtain the number of * registers provided by the current DTrace implementation. */ #define DIF_VERSION_1 1 /* DIF version 1: Solaris 10 Beta */ #define DIF_VERSION_2 2 /* DIF version 2: Solaris 10 FCS */ #define DIF_VERSION DIF_VERSION_2 /* latest DIF instruction set version */ #define DIF_DIR_NREGS 8 /* number of DIF integer registers */ #define DIF_DTR_NREGS 8 /* number of DIF tuple registers */ #define DIF_OP_OR 1 /* or r1, r2, rd */ #define DIF_OP_XOR 2 /* xor r1, r2, rd */ #define DIF_OP_AND 3 /* and r1, r2, rd */ #define DIF_OP_SLL 4 /* sll r1, r2, rd */ #define DIF_OP_SRL 5 /* srl r1, r2, rd */ #define DIF_OP_SUB 6 /* sub r1, r2, rd */ #define DIF_OP_ADD 7 /* add r1, r2, rd */ #define DIF_OP_MUL 8 /* mul r1, r2, rd */ #define DIF_OP_SDIV 9 /* sdiv r1, r2, rd */ #define DIF_OP_UDIV 10 /* udiv r1, r2, rd */ #define DIF_OP_SREM 11 /* srem r1, r2, rd */ #define DIF_OP_UREM 12 /* urem r1, r2, rd */ #define DIF_OP_NOT 13 /* not r1, rd */ #define DIF_OP_MOV 14 /* mov r1, rd */ #define DIF_OP_CMP 15 /* cmp r1, r2 */ #define DIF_OP_TST 16 /* tst r1 */ #define DIF_OP_BA 17 /* ba label */ #define DIF_OP_BE 18 /* be label */ #define DIF_OP_BNE 19 /* bne label */ #define DIF_OP_BG 20 /* bg label */ #define DIF_OP_BGU 21 /* bgu label */ #define DIF_OP_BGE 22 /* bge label */ #define DIF_OP_BGEU 23 /* bgeu label */ #define DIF_OP_BL 24 /* bl label */ #define DIF_OP_BLU 25 /* blu label */ #define DIF_OP_BLE 26 /* ble label */ #define DIF_OP_BLEU 27 /* bleu label */ #define DIF_OP_LDSB 28 /* ldsb [r1], rd */ #define DIF_OP_LDSH 29 /* ldsh [r1], rd */ #define DIF_OP_LDSW 30 /* ldsw [r1], rd */ #define DIF_OP_LDUB 31 /* ldub [r1], rd */ #define DIF_OP_LDUH 32 /* lduh [r1], rd */ #define DIF_OP_LDUW 33 /* lduw [r1], rd */ #define DIF_OP_LDX 34 /* ldx [r1], rd */ #define DIF_OP_RET 35 /* ret rd */ #define DIF_OP_NOP 36 /* nop */ #define DIF_OP_SETX 37 /* setx intindex, rd */ #define DIF_OP_SETS 38 /* sets strindex, rd */ #define DIF_OP_SCMP 39 /* scmp r1, r2 */ #define DIF_OP_LDGA 40 /* ldga var, ri, rd */ #define DIF_OP_LDGS 41 /* ldgs var, rd */ #define DIF_OP_STGS 42 /* stgs var, rs */ #define DIF_OP_LDTA 43 /* ldta var, ri, rd */ #define DIF_OP_LDTS 44 /* ldts var, rd */ #define DIF_OP_STTS 45 /* stts var, rs */ #define DIF_OP_SRA 46 /* sra r1, r2, rd */ #define DIF_OP_CALL 47 /* call subr, rd */ #define DIF_OP_PUSHTR 48 /* pushtr type, rs, rr */ #define DIF_OP_PUSHTV 49 /* pushtv type, rs, rv */ #define DIF_OP_POPTS 50 /* popts */ #define DIF_OP_FLUSHTS 51 /* flushts */ #define DIF_OP_LDGAA 52 /* ldgaa var, rd */ #define DIF_OP_LDTAA 53 /* ldtaa var, rd */ #define DIF_OP_STGAA 54 /* stgaa var, rs */ #define DIF_OP_STTAA 55 /* sttaa var, rs */ #define DIF_OP_LDLS 56 /* ldls var, rd */ #define DIF_OP_STLS 57 /* stls var, rs */ #define DIF_OP_ALLOCS 58 /* allocs r1, rd */ #define DIF_OP_COPYS 59 /* copys r1, r2, rd */ #define DIF_OP_STB 60 /* stb r1, [rd] */ #define DIF_OP_STH 61 /* sth r1, [rd] */ #define DIF_OP_STW 62 /* stw r1, [rd] */ #define DIF_OP_STX 63 /* stx r1, [rd] */ #define DIF_OP_ULDSB 64 /* uldsb [r1], rd */ #define DIF_OP_ULDSH 65 /* uldsh [r1], rd */ #define DIF_OP_ULDSW 66 /* uldsw [r1], rd */ #define DIF_OP_ULDUB 67 /* uldub [r1], rd */ #define DIF_OP_ULDUH 68 /* ulduh [r1], rd */ #define DIF_OP_ULDUW 69 /* ulduw [r1], rd */ #define DIF_OP_ULDX 70 /* uldx [r1], rd */ #define DIF_OP_RLDSB 71 /* rldsb [r1], rd */ #define DIF_OP_RLDSH 72 /* rldsh [r1], rd */ #define DIF_OP_RLDSW 73 /* rldsw [r1], rd */ #define DIF_OP_RLDUB 74 /* rldub [r1], rd */ #define DIF_OP_RLDUH 75 /* rlduh [r1], rd */ #define DIF_OP_RLDUW 76 /* rlduw [r1], rd */ #define DIF_OP_RLDX 77 /* rldx [r1], rd */ #define DIF_OP_XLATE 78 /* xlate xlrindex, rd */ #define DIF_OP_XLARG 79 /* xlarg xlrindex, rd */ #define DIF_INTOFF_MAX 0xffff /* highest integer table offset */ #define DIF_STROFF_MAX 0xffff /* highest string table offset */ #define DIF_REGISTER_MAX 0xff /* highest register number */ #define DIF_VARIABLE_MAX 0xffff /* highest variable identifier */ #define DIF_SUBROUTINE_MAX 0xffff /* highest subroutine code */ #define DIF_VAR_ARRAY_MIN 0x0000 /* lowest numbered array variable */ #define DIF_VAR_ARRAY_UBASE 0x0080 /* lowest user-defined array */ #define DIF_VAR_ARRAY_MAX 0x00ff /* highest numbered array variable */ #define DIF_VAR_OTHER_MIN 0x0100 /* lowest numbered scalar or assc */ #define DIF_VAR_OTHER_UBASE 0x0500 /* lowest user-defined scalar or assc */ #define DIF_VAR_OTHER_MAX 0xffff /* highest numbered scalar or assc */ #define DIF_VAR_ARGS 0x0000 /* arguments array */ #define DIF_VAR_REGS 0x0001 /* registers array */ #define DIF_VAR_UREGS 0x0002 /* user registers array */ #define DIF_VAR_CURTHREAD 0x0100 /* thread pointer */ #define DIF_VAR_TIMESTAMP 0x0101 /* timestamp */ #define DIF_VAR_VTIMESTAMP 0x0102 /* virtual timestamp */ #define DIF_VAR_IPL 0x0103 /* interrupt priority level */ #define DIF_VAR_EPID 0x0104 /* enabled probe ID */ #define DIF_VAR_ID 0x0105 /* probe ID */ #define DIF_VAR_ARG0 0x0106 /* first argument */ #define DIF_VAR_ARG1 0x0107 /* second argument */ #define DIF_VAR_ARG2 0x0108 /* third argument */ #define DIF_VAR_ARG3 0x0109 /* fourth argument */ #define DIF_VAR_ARG4 0x010a /* fifth argument */ #define DIF_VAR_ARG5 0x010b /* sixth argument */ #define DIF_VAR_ARG6 0x010c /* seventh argument */ #define DIF_VAR_ARG7 0x010d /* eighth argument */ #define DIF_VAR_ARG8 0x010e /* ninth argument */ #define DIF_VAR_ARG9 0x010f /* tenth argument */ #define DIF_VAR_STACKDEPTH 0x0110 /* stack depth */ #define DIF_VAR_CALLER 0x0111 /* caller */ #define DIF_VAR_PROBEPROV 0x0112 /* probe provider */ #define DIF_VAR_PROBEMOD 0x0113 /* probe module */ #define DIF_VAR_PROBEFUNC 0x0114 /* probe function */ #define DIF_VAR_PROBENAME 0x0115 /* probe name */ #define DIF_VAR_PID 0x0116 /* process ID */ #define DIF_VAR_TID 0x0117 /* (per-process) thread ID */ #define DIF_VAR_EXECNAME 0x0118 /* name of executable */ #define DIF_VAR_ZONENAME 0x0119 /* zone name associated with process */ #define DIF_VAR_WALLTIMESTAMP 0x011a /* wall-clock timestamp */ #define DIF_VAR_USTACKDEPTH 0x011b /* user-land stack depth */ #define DIF_VAR_UCALLER 0x011c /* user-level caller */ #define DIF_VAR_PPID 0x011d /* parent process ID */ #define DIF_VAR_UID 0x011e /* process user ID */ #define DIF_VAR_GID 0x011f /* process group ID */ #define DIF_VAR_ERRNO 0x0120 /* thread errno */ #define DIF_VAR_EXECARGS 0x0121 /* process arguments */ #ifndef illumos #define DIF_VAR_CPU 0x0200 #endif #define DIF_SUBR_RAND 0 #define DIF_SUBR_MUTEX_OWNED 1 #define DIF_SUBR_MUTEX_OWNER 2 #define DIF_SUBR_MUTEX_TYPE_ADAPTIVE 3 #define DIF_SUBR_MUTEX_TYPE_SPIN 4 #define DIF_SUBR_RW_READ_HELD 5 #define DIF_SUBR_RW_WRITE_HELD 6 #define DIF_SUBR_RW_ISWRITER 7 #define DIF_SUBR_COPYIN 8 #define DIF_SUBR_COPYINSTR 9 #define DIF_SUBR_SPECULATION 10 #define DIF_SUBR_PROGENYOF 11 #define DIF_SUBR_STRLEN 12 #define DIF_SUBR_COPYOUT 13 #define DIF_SUBR_COPYOUTSTR 14 #define DIF_SUBR_ALLOCA 15 #define DIF_SUBR_BCOPY 16 #define DIF_SUBR_COPYINTO 17 #define DIF_SUBR_MSGDSIZE 18 #define DIF_SUBR_MSGSIZE 19 #define DIF_SUBR_GETMAJOR 20 #define DIF_SUBR_GETMINOR 21 #define DIF_SUBR_DDI_PATHNAME 22 #define DIF_SUBR_STRJOIN 23 #define DIF_SUBR_LLTOSTR 24 #define DIF_SUBR_BASENAME 25 #define DIF_SUBR_DIRNAME 26 #define DIF_SUBR_CLEANPATH 27 #define DIF_SUBR_STRCHR 28 #define DIF_SUBR_STRRCHR 29 #define DIF_SUBR_STRSTR 30 #define DIF_SUBR_STRTOK 31 #define DIF_SUBR_SUBSTR 32 #define DIF_SUBR_INDEX 33 #define DIF_SUBR_RINDEX 34 #define DIF_SUBR_HTONS 35 #define DIF_SUBR_HTONL 36 #define DIF_SUBR_HTONLL 37 #define DIF_SUBR_NTOHS 38 #define DIF_SUBR_NTOHL 39 #define DIF_SUBR_NTOHLL 40 #define DIF_SUBR_INET_NTOP 41 #define DIF_SUBR_INET_NTOA 42 #define DIF_SUBR_INET_NTOA6 43 #define DIF_SUBR_TOUPPER 44 #define DIF_SUBR_TOLOWER 45 #define DIF_SUBR_MEMREF 46 #define DIF_SUBR_SX_SHARED_HELD 47 #define DIF_SUBR_SX_EXCLUSIVE_HELD 48 #define DIF_SUBR_SX_ISEXCLUSIVE 49 #define DIF_SUBR_MEMSTR 50 #define DIF_SUBR_GETF 51 #define DIF_SUBR_JSON 52 #define DIF_SUBR_STRTOLL 53 #define DIF_SUBR_MAX 53 /* max subroutine value */ typedef uint32_t dif_instr_t; #define DIF_INSTR_OP(i) (((i) >> 24) & 0xff) #define DIF_INSTR_R1(i) (((i) >> 16) & 0xff) #define DIF_INSTR_R2(i) (((i) >> 8) & 0xff) #define DIF_INSTR_RD(i) ((i) & 0xff) #define DIF_INSTR_RS(i) ((i) & 0xff) #define DIF_INSTR_LABEL(i) ((i) & 0xffffff) #define DIF_INSTR_VAR(i) (((i) >> 8) & 0xffff) #define DIF_INSTR_INTEGER(i) (((i) >> 8) & 0xffff) #define DIF_INSTR_STRING(i) (((i) >> 8) & 0xffff) #define DIF_INSTR_SUBR(i) (((i) >> 8) & 0xffff) #define DIF_INSTR_TYPE(i) (((i) >> 16) & 0xff) #define DIF_INSTR_XLREF(i) (((i) >> 8) & 0xffff) #define DIF_INSTR_FMT(op, r1, r2, d) \ (((op) << 24) | ((r1) << 16) | ((r2) << 8) | (d)) #define DIF_INSTR_NOT(r1, d) (DIF_INSTR_FMT(DIF_OP_NOT, r1, 0, d)) #define DIF_INSTR_MOV(r1, d) (DIF_INSTR_FMT(DIF_OP_MOV, r1, 0, d)) #define DIF_INSTR_CMP(op, r1, r2) (DIF_INSTR_FMT(op, r1, r2, 0)) #define DIF_INSTR_TST(r1) (DIF_INSTR_FMT(DIF_OP_TST, r1, 0, 0)) #define DIF_INSTR_BRANCH(op, label) (((op) << 24) | (label)) #define DIF_INSTR_LOAD(op, r1, d) (DIF_INSTR_FMT(op, r1, 0, d)) #define DIF_INSTR_STORE(op, r1, d) (DIF_INSTR_FMT(op, r1, 0, d)) #define DIF_INSTR_SETX(i, d) ((DIF_OP_SETX << 24) | ((i) << 8) | (d)) #define DIF_INSTR_SETS(s, d) ((DIF_OP_SETS << 24) | ((s) << 8) | (d)) #define DIF_INSTR_RET(d) (DIF_INSTR_FMT(DIF_OP_RET, 0, 0, d)) #define DIF_INSTR_NOP (DIF_OP_NOP << 24) #define DIF_INSTR_LDA(op, v, r, d) (DIF_INSTR_FMT(op, v, r, d)) #define DIF_INSTR_LDV(op, v, d) (((op) << 24) | ((v) << 8) | (d)) #define DIF_INSTR_STV(op, v, rs) (((op) << 24) | ((v) << 8) | (rs)) #define DIF_INSTR_CALL(s, d) ((DIF_OP_CALL << 24) | ((s) << 8) | (d)) #define DIF_INSTR_PUSHTS(op, t, r2, rs) (DIF_INSTR_FMT(op, t, r2, rs)) #define DIF_INSTR_POPTS (DIF_OP_POPTS << 24) #define DIF_INSTR_FLUSHTS (DIF_OP_FLUSHTS << 24) #define DIF_INSTR_ALLOCS(r1, d) (DIF_INSTR_FMT(DIF_OP_ALLOCS, r1, 0, d)) #define DIF_INSTR_COPYS(r1, r2, d) (DIF_INSTR_FMT(DIF_OP_COPYS, r1, r2, d)) #define DIF_INSTR_XLATE(op, r, d) (((op) << 24) | ((r) << 8) | (d)) #define DIF_REG_R0 0 /* %r0 is always set to zero */ /* * A DTrace Intermediate Format Type (DIF Type) is used to represent the types * of variables, function and associative array arguments, and the return type * for each DIF object (shown below). It contains a description of the type, * its size in bytes, and a module identifier. */ typedef struct dtrace_diftype { uint8_t dtdt_kind; /* type kind (see below) */ uint8_t dtdt_ckind; /* type kind in CTF */ uint8_t dtdt_flags; /* type flags (see below) */ uint8_t dtdt_pad; /* reserved for future use */ uint32_t dtdt_size; /* type size in bytes (unless string) */ } dtrace_diftype_t; #define DIF_TYPE_CTF 0 /* type is a CTF type */ #define DIF_TYPE_STRING 1 /* type is a D string */ #define DIF_TF_BYREF 0x1 /* type is passed by reference */ #define DIF_TF_BYUREF 0x2 /* user type is passed by reference */ /* * A DTrace Intermediate Format variable record is used to describe each of the * variables referenced by a given DIF object. It contains an integer variable * identifier along with variable scope and properties, as shown below. The * size of this structure must be sizeof (int) aligned. */ typedef struct dtrace_difv { uint32_t dtdv_name; /* variable name index in dtdo_strtab */ uint32_t dtdv_id; /* variable reference identifier */ uint8_t dtdv_kind; /* variable kind (see below) */ uint8_t dtdv_scope; /* variable scope (see below) */ uint16_t dtdv_flags; /* variable flags (see below) */ dtrace_diftype_t dtdv_type; /* variable type (see above) */ } dtrace_difv_t; #define DIFV_KIND_ARRAY 0 /* variable is an array of quantities */ #define DIFV_KIND_SCALAR 1 /* variable is a scalar quantity */ #define DIFV_SCOPE_GLOBAL 0 /* variable has global scope */ #define DIFV_SCOPE_THREAD 1 /* variable has thread scope */ #define DIFV_SCOPE_LOCAL 2 /* variable has local scope */ #define DIFV_F_REF 0x1 /* variable is referenced by DIFO */ #define DIFV_F_MOD 0x2 /* variable is written by DIFO */ /* * DTrace Actions * * The upper byte determines the class of the action; the low bytes determines * the specific action within that class. The classes of actions are as * follows: * * [ no class ] <= May record process- or kernel-related data * DTRACEACT_PROC <= Only records process-related data * DTRACEACT_PROC_DESTRUCTIVE <= Potentially destructive to processes * DTRACEACT_KERNEL <= Only records kernel-related data * DTRACEACT_KERNEL_DESTRUCTIVE <= Potentially destructive to the kernel * DTRACEACT_SPECULATIVE <= Speculation-related action * DTRACEACT_AGGREGATION <= Aggregating action */ #define DTRACEACT_NONE 0 /* no action */ #define DTRACEACT_DIFEXPR 1 /* action is DIF expression */ #define DTRACEACT_EXIT 2 /* exit() action */ #define DTRACEACT_PRINTF 3 /* printf() action */ #define DTRACEACT_PRINTA 4 /* printa() action */ #define DTRACEACT_LIBACT 5 /* library-controlled action */ #define DTRACEACT_TRACEMEM 6 /* tracemem() action */ #define DTRACEACT_TRACEMEM_DYNSIZE 7 /* dynamic tracemem() size */ #define DTRACEACT_PRINTM 8 /* printm() action (BSD) */ #define DTRACEACT_PROC 0x0100 #define DTRACEACT_USTACK (DTRACEACT_PROC + 1) #define DTRACEACT_JSTACK (DTRACEACT_PROC + 2) #define DTRACEACT_USYM (DTRACEACT_PROC + 3) #define DTRACEACT_UMOD (DTRACEACT_PROC + 4) #define DTRACEACT_UADDR (DTRACEACT_PROC + 5) #define DTRACEACT_PROC_DESTRUCTIVE 0x0200 #define DTRACEACT_STOP (DTRACEACT_PROC_DESTRUCTIVE + 1) #define DTRACEACT_RAISE (DTRACEACT_PROC_DESTRUCTIVE + 2) #define DTRACEACT_SYSTEM (DTRACEACT_PROC_DESTRUCTIVE + 3) #define DTRACEACT_FREOPEN (DTRACEACT_PROC_DESTRUCTIVE + 4) #define DTRACEACT_PROC_CONTROL 0x0300 #define DTRACEACT_KERNEL 0x0400 #define DTRACEACT_STACK (DTRACEACT_KERNEL + 1) #define DTRACEACT_SYM (DTRACEACT_KERNEL + 2) #define DTRACEACT_MOD (DTRACEACT_KERNEL + 3) #define DTRACEACT_KERNEL_DESTRUCTIVE 0x0500 #define DTRACEACT_BREAKPOINT (DTRACEACT_KERNEL_DESTRUCTIVE + 1) #define DTRACEACT_PANIC (DTRACEACT_KERNEL_DESTRUCTIVE + 2) #define DTRACEACT_CHILL (DTRACEACT_KERNEL_DESTRUCTIVE + 3) #define DTRACEACT_SPECULATIVE 0x0600 #define DTRACEACT_SPECULATE (DTRACEACT_SPECULATIVE + 1) #define DTRACEACT_COMMIT (DTRACEACT_SPECULATIVE + 2) #define DTRACEACT_DISCARD (DTRACEACT_SPECULATIVE + 3) #define DTRACEACT_CLASS(x) ((x) & 0xff00) #define DTRACEACT_ISDESTRUCTIVE(x) \ (DTRACEACT_CLASS(x) == DTRACEACT_PROC_DESTRUCTIVE || \ DTRACEACT_CLASS(x) == DTRACEACT_KERNEL_DESTRUCTIVE) #define DTRACEACT_ISSPECULATIVE(x) \ (DTRACEACT_CLASS(x) == DTRACEACT_SPECULATIVE) #define DTRACEACT_ISPRINTFLIKE(x) \ ((x) == DTRACEACT_PRINTF || (x) == DTRACEACT_PRINTA || \ (x) == DTRACEACT_SYSTEM || (x) == DTRACEACT_FREOPEN) /* * DTrace Aggregating Actions * * These are functions f(x) for which the following is true: * * f(f(x_0) U f(x_1) U ... U f(x_n)) = f(x_0 U x_1 U ... U x_n) * * where x_n is a set of arbitrary data. Aggregating actions are in their own * DTrace action class, DTTRACEACT_AGGREGATION. The macros provided here allow * for easier processing of the aggregation argument and data payload for a few * aggregating actions (notably: quantize(), lquantize(), and ustack()). */ #define DTRACEACT_AGGREGATION 0x0700 #define DTRACEAGG_COUNT (DTRACEACT_AGGREGATION + 1) #define DTRACEAGG_MIN (DTRACEACT_AGGREGATION + 2) #define DTRACEAGG_MAX (DTRACEACT_AGGREGATION + 3) #define DTRACEAGG_AVG (DTRACEACT_AGGREGATION + 4) #define DTRACEAGG_SUM (DTRACEACT_AGGREGATION + 5) #define DTRACEAGG_STDDEV (DTRACEACT_AGGREGATION + 6) #define DTRACEAGG_QUANTIZE (DTRACEACT_AGGREGATION + 7) #define DTRACEAGG_LQUANTIZE (DTRACEACT_AGGREGATION + 8) #define DTRACEAGG_LLQUANTIZE (DTRACEACT_AGGREGATION + 9) #define DTRACEACT_ISAGG(x) \ (DTRACEACT_CLASS(x) == DTRACEACT_AGGREGATION) #define DTRACE_QUANTIZE_NBUCKETS \ (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) #define DTRACE_QUANTIZE_ZEROBUCKET ((sizeof (uint64_t) * NBBY) - 1) #define DTRACE_QUANTIZE_BUCKETVAL(buck) \ (int64_t)((buck) < DTRACE_QUANTIZE_ZEROBUCKET ? \ -(1LL << (DTRACE_QUANTIZE_ZEROBUCKET - 1 - (buck))) : \ (buck) == DTRACE_QUANTIZE_ZEROBUCKET ? 0 : \ 1LL << ((buck) - DTRACE_QUANTIZE_ZEROBUCKET - 1)) #define DTRACE_LQUANTIZE_STEPSHIFT 48 #define DTRACE_LQUANTIZE_STEPMASK ((uint64_t)UINT16_MAX << 48) #define DTRACE_LQUANTIZE_LEVELSHIFT 32 #define DTRACE_LQUANTIZE_LEVELMASK ((uint64_t)UINT16_MAX << 32) #define DTRACE_LQUANTIZE_BASESHIFT 0 #define DTRACE_LQUANTIZE_BASEMASK UINT32_MAX #define DTRACE_LQUANTIZE_STEP(x) \ (uint16_t)(((x) & DTRACE_LQUANTIZE_STEPMASK) >> \ DTRACE_LQUANTIZE_STEPSHIFT) #define DTRACE_LQUANTIZE_LEVELS(x) \ (uint16_t)(((x) & DTRACE_LQUANTIZE_LEVELMASK) >> \ DTRACE_LQUANTIZE_LEVELSHIFT) #define DTRACE_LQUANTIZE_BASE(x) \ (int32_t)(((x) & DTRACE_LQUANTIZE_BASEMASK) >> \ DTRACE_LQUANTIZE_BASESHIFT) #define DTRACE_LLQUANTIZE_FACTORSHIFT 48 #define DTRACE_LLQUANTIZE_FACTORMASK ((uint64_t)UINT16_MAX << 48) #define DTRACE_LLQUANTIZE_LOWSHIFT 32 #define DTRACE_LLQUANTIZE_LOWMASK ((uint64_t)UINT16_MAX << 32) #define DTRACE_LLQUANTIZE_HIGHSHIFT 16 #define DTRACE_LLQUANTIZE_HIGHMASK ((uint64_t)UINT16_MAX << 16) #define DTRACE_LLQUANTIZE_NSTEPSHIFT 0 #define DTRACE_LLQUANTIZE_NSTEPMASK UINT16_MAX #define DTRACE_LLQUANTIZE_FACTOR(x) \ (uint16_t)(((x) & DTRACE_LLQUANTIZE_FACTORMASK) >> \ DTRACE_LLQUANTIZE_FACTORSHIFT) #define DTRACE_LLQUANTIZE_LOW(x) \ (uint16_t)(((x) & DTRACE_LLQUANTIZE_LOWMASK) >> \ DTRACE_LLQUANTIZE_LOWSHIFT) #define DTRACE_LLQUANTIZE_HIGH(x) \ (uint16_t)(((x) & DTRACE_LLQUANTIZE_HIGHMASK) >> \ DTRACE_LLQUANTIZE_HIGHSHIFT) #define DTRACE_LLQUANTIZE_NSTEP(x) \ (uint16_t)(((x) & DTRACE_LLQUANTIZE_NSTEPMASK) >> \ DTRACE_LLQUANTIZE_NSTEPSHIFT) #define DTRACE_USTACK_NFRAMES(x) (uint32_t)((x) & UINT32_MAX) #define DTRACE_USTACK_STRSIZE(x) (uint32_t)((x) >> 32) #define DTRACE_USTACK_ARG(x, y) \ ((((uint64_t)(y)) << 32) | ((x) & UINT32_MAX)) #ifndef _LP64 #if BYTE_ORDER == _BIG_ENDIAN #define DTRACE_PTR(type, name) uint32_t name##pad; type *name #else #define DTRACE_PTR(type, name) type *name; uint32_t name##pad #endif #else #define DTRACE_PTR(type, name) type *name #endif /* * DTrace Object Format (DOF) * * DTrace programs can be persistently encoded in the DOF format so that they * may be embedded in other programs (for example, in an ELF file) or in the * dtrace driver configuration file for use in anonymous tracing. The DOF * format is versioned and extensible so that it can be revised and so that * internal data structures can be modified or extended compatibly. All DOF * structures use fixed-size types, so the 32-bit and 64-bit representations * are identical and consumers can use either data model transparently. * * The file layout is structured as follows: * * +---------------+-------------------+----- ... ----+---- ... ------+ * | dof_hdr_t | dof_sec_t[ ... ] | loadable | non-loadable | * | (file header) | (section headers) | section data | section data | * +---------------+-------------------+----- ... ----+---- ... ------+ * |<------------ dof_hdr.dofh_loadsz --------------->| | * |<------------ dof_hdr.dofh_filesz ------------------------------->| * * The file header stores meta-data including a magic number, data model for * the instrumentation, data encoding, and properties of the DIF code within. * The header describes its own size and the size of the section headers. By * convention, an array of section headers follows the file header, and then * the data for all loadable sections and unloadable sections. This permits * consumer code to easily download the headers and all loadable data into the * DTrace driver in one contiguous chunk, omitting other extraneous sections. * * The section headers describe the size, offset, alignment, and section type * for each section. Sections are described using a set of #defines that tell * the consumer what kind of data is expected. Sections can contain links to * other sections by storing a dof_secidx_t, an index into the section header * array, inside of the section data structures. The section header includes * an entry size so that sections with data arrays can grow their structures. * * The DOF data itself can contain many snippets of DIF (i.e. >1 DIFOs), which * are represented themselves as a collection of related DOF sections. This * permits us to change the set of sections associated with a DIFO over time, * and also permits us to encode DIFOs that contain different sets of sections. * When a DOF section wants to refer to a DIFO, it stores the dof_secidx_t of a * section of type DOF_SECT_DIFOHDR. This section's data is then an array of * dof_secidx_t's which in turn denote the sections associated with this DIFO. * * This loose coupling of the file structure (header and sections) to the * structure of the DTrace program itself (ECB descriptions, action * descriptions, and DIFOs) permits activities such as relocation processing * to occur in a single pass without having to understand D program structure. * * Finally, strings are always stored in ELF-style string tables along with a * string table section index and string table offset. Therefore strings in * DOF are always arbitrary-length and not bound to the current implementation. */ #define DOF_ID_SIZE 16 /* total size of dofh_ident[] in bytes */ typedef struct dof_hdr { uint8_t dofh_ident[DOF_ID_SIZE]; /* identification bytes (see below) */ uint32_t dofh_flags; /* file attribute flags (if any) */ uint32_t dofh_hdrsize; /* size of file header in bytes */ uint32_t dofh_secsize; /* size of section header in bytes */ uint32_t dofh_secnum; /* number of section headers */ uint64_t dofh_secoff; /* file offset of section headers */ uint64_t dofh_loadsz; /* file size of loadable portion */ uint64_t dofh_filesz; /* file size of entire DOF file */ uint64_t dofh_pad; /* reserved for future use */ } dof_hdr_t; #define DOF_ID_MAG0 0 /* first byte of magic number */ #define DOF_ID_MAG1 1 /* second byte of magic number */ #define DOF_ID_MAG2 2 /* third byte of magic number */ #define DOF_ID_MAG3 3 /* fourth byte of magic number */ #define DOF_ID_MODEL 4 /* DOF data model (see below) */ #define DOF_ID_ENCODING 5 /* DOF data encoding (see below) */ #define DOF_ID_VERSION 6 /* DOF file format major version (see below) */ #define DOF_ID_DIFVERS 7 /* DIF instruction set version */ #define DOF_ID_DIFIREG 8 /* DIF integer registers used by compiler */ #define DOF_ID_DIFTREG 9 /* DIF tuple registers used by compiler */ #define DOF_ID_PAD 10 /* start of padding bytes (all zeroes) */ #define DOF_MAG_MAG0 0x7F /* DOF_ID_MAG[0-3] */ #define DOF_MAG_MAG1 'D' #define DOF_MAG_MAG2 'O' #define DOF_MAG_MAG3 'F' #define DOF_MAG_STRING "\177DOF" #define DOF_MAG_STRLEN 4 #define DOF_MODEL_NONE 0 /* DOF_ID_MODEL */ #define DOF_MODEL_ILP32 1 #define DOF_MODEL_LP64 2 #ifdef _LP64 #define DOF_MODEL_NATIVE DOF_MODEL_LP64 #else #define DOF_MODEL_NATIVE DOF_MODEL_ILP32 #endif #define DOF_ENCODE_NONE 0 /* DOF_ID_ENCODING */ #define DOF_ENCODE_LSB 1 #define DOF_ENCODE_MSB 2 #if BYTE_ORDER == _BIG_ENDIAN #define DOF_ENCODE_NATIVE DOF_ENCODE_MSB #else #define DOF_ENCODE_NATIVE DOF_ENCODE_LSB #endif #define DOF_VERSION_1 1 /* DOF version 1: Solaris 10 FCS */ #define DOF_VERSION_2 2 /* DOF version 2: Solaris Express 6/06 */ #define DOF_VERSION DOF_VERSION_2 /* Latest DOF version */ #define DOF_FL_VALID 0 /* mask of all valid dofh_flags bits */ typedef uint32_t dof_secidx_t; /* section header table index type */ typedef uint32_t dof_stridx_t; /* string table index type */ #define DOF_SECIDX_NONE (-1U) /* null value for section indices */ #define DOF_STRIDX_NONE (-1U) /* null value for string indices */ typedef struct dof_sec { uint32_t dofs_type; /* section type (see below) */ uint32_t dofs_align; /* section data memory alignment */ uint32_t dofs_flags; /* section flags (if any) */ uint32_t dofs_entsize; /* size of section entry (if table) */ uint64_t dofs_offset; /* offset of section data within file */ uint64_t dofs_size; /* size of section data in bytes */ } dof_sec_t; #define DOF_SECT_NONE 0 /* null section */ #define DOF_SECT_COMMENTS 1 /* compiler comments */ #define DOF_SECT_SOURCE 2 /* D program source code */ #define DOF_SECT_ECBDESC 3 /* dof_ecbdesc_t */ #define DOF_SECT_PROBEDESC 4 /* dof_probedesc_t */ #define DOF_SECT_ACTDESC 5 /* dof_actdesc_t array */ #define DOF_SECT_DIFOHDR 6 /* dof_difohdr_t (variable length) */ #define DOF_SECT_DIF 7 /* uint32_t array of byte code */ #define DOF_SECT_STRTAB 8 /* string table */ #define DOF_SECT_VARTAB 9 /* dtrace_difv_t array */ #define DOF_SECT_RELTAB 10 /* dof_relodesc_t array */ #define DOF_SECT_TYPTAB 11 /* dtrace_diftype_t array */ #define DOF_SECT_URELHDR 12 /* dof_relohdr_t (user relocations) */ #define DOF_SECT_KRELHDR 13 /* dof_relohdr_t (kernel relocations) */ #define DOF_SECT_OPTDESC 14 /* dof_optdesc_t array */ #define DOF_SECT_PROVIDER 15 /* dof_provider_t */ #define DOF_SECT_PROBES 16 /* dof_probe_t array */ #define DOF_SECT_PRARGS 17 /* uint8_t array (probe arg mappings) */ #define DOF_SECT_PROFFS 18 /* uint32_t array (probe arg offsets) */ #define DOF_SECT_INTTAB 19 /* uint64_t array */ #define DOF_SECT_UTSNAME 20 /* struct utsname */ #define DOF_SECT_XLTAB 21 /* dof_xlref_t array */ #define DOF_SECT_XLMEMBERS 22 /* dof_xlmember_t array */ #define DOF_SECT_XLIMPORT 23 /* dof_xlator_t */ #define DOF_SECT_XLEXPORT 24 /* dof_xlator_t */ #define DOF_SECT_PREXPORT 25 /* dof_secidx_t array (exported objs) */ #define DOF_SECT_PRENOFFS 26 /* uint32_t array (enabled offsets) */ #define DOF_SECF_LOAD 1 /* section should be loaded */ #define DOF_SEC_ISLOADABLE(x) \ (((x) == DOF_SECT_ECBDESC) || ((x) == DOF_SECT_PROBEDESC) || \ ((x) == DOF_SECT_ACTDESC) || ((x) == DOF_SECT_DIFOHDR) || \ ((x) == DOF_SECT_DIF) || ((x) == DOF_SECT_STRTAB) || \ ((x) == DOF_SECT_VARTAB) || ((x) == DOF_SECT_RELTAB) || \ ((x) == DOF_SECT_TYPTAB) || ((x) == DOF_SECT_URELHDR) || \ ((x) == DOF_SECT_KRELHDR) || ((x) == DOF_SECT_OPTDESC) || \ ((x) == DOF_SECT_PROVIDER) || ((x) == DOF_SECT_PROBES) || \ ((x) == DOF_SECT_PRARGS) || ((x) == DOF_SECT_PROFFS) || \ ((x) == DOF_SECT_INTTAB) || ((x) == DOF_SECT_XLTAB) || \ ((x) == DOF_SECT_XLMEMBERS) || ((x) == DOF_SECT_XLIMPORT) || \ ((x) == DOF_SECT_XLEXPORT) || ((x) == DOF_SECT_PREXPORT) || \ ((x) == DOF_SECT_PRENOFFS)) typedef struct dof_ecbdesc { dof_secidx_t dofe_probes; /* link to DOF_SECT_PROBEDESC */ dof_secidx_t dofe_pred; /* link to DOF_SECT_DIFOHDR */ dof_secidx_t dofe_actions; /* link to DOF_SECT_ACTDESC */ uint32_t dofe_pad; /* reserved for future use */ uint64_t dofe_uarg; /* user-supplied library argument */ } dof_ecbdesc_t; typedef struct dof_probedesc { dof_secidx_t dofp_strtab; /* link to DOF_SECT_STRTAB section */ dof_stridx_t dofp_provider; /* provider string */ dof_stridx_t dofp_mod; /* module string */ dof_stridx_t dofp_func; /* function string */ dof_stridx_t dofp_name; /* name string */ uint32_t dofp_id; /* probe identifier (or zero) */ } dof_probedesc_t; typedef struct dof_actdesc { dof_secidx_t dofa_difo; /* link to DOF_SECT_DIFOHDR */ dof_secidx_t dofa_strtab; /* link to DOF_SECT_STRTAB section */ uint32_t dofa_kind; /* action kind (DTRACEACT_* constant) */ uint32_t dofa_ntuple; /* number of subsequent tuple actions */ uint64_t dofa_arg; /* kind-specific argument */ uint64_t dofa_uarg; /* user-supplied argument */ } dof_actdesc_t; typedef struct dof_difohdr { dtrace_diftype_t dofd_rtype; /* return type for this fragment */ dof_secidx_t dofd_links[1]; /* variable length array of indices */ } dof_difohdr_t; typedef struct dof_relohdr { dof_secidx_t dofr_strtab; /* link to DOF_SECT_STRTAB for names */ dof_secidx_t dofr_relsec; /* link to DOF_SECT_RELTAB for relos */ dof_secidx_t dofr_tgtsec; /* link to section we are relocating */ } dof_relohdr_t; typedef struct dof_relodesc { dof_stridx_t dofr_name; /* string name of relocation symbol */ uint32_t dofr_type; /* relo type (DOF_RELO_* constant) */ uint64_t dofr_offset; /* byte offset for relocation */ uint64_t dofr_data; /* additional type-specific data */ } dof_relodesc_t; #define DOF_RELO_NONE 0 /* empty relocation entry */ #define DOF_RELO_SETX 1 /* relocate setx value */ typedef struct dof_optdesc { uint32_t dofo_option; /* option identifier */ dof_secidx_t dofo_strtab; /* string table, if string option */ uint64_t dofo_value; /* option value or string index */ } dof_optdesc_t; typedef uint32_t dof_attr_t; /* encoded stability attributes */ #define DOF_ATTR(n, d, c) (((n) << 24) | ((d) << 16) | ((c) << 8)) #define DOF_ATTR_NAME(a) (((a) >> 24) & 0xff) #define DOF_ATTR_DATA(a) (((a) >> 16) & 0xff) #define DOF_ATTR_CLASS(a) (((a) >> 8) & 0xff) typedef struct dof_provider { dof_secidx_t dofpv_strtab; /* link to DOF_SECT_STRTAB section */ dof_secidx_t dofpv_probes; /* link to DOF_SECT_PROBES section */ dof_secidx_t dofpv_prargs; /* link to DOF_SECT_PRARGS section */ dof_secidx_t dofpv_proffs; /* link to DOF_SECT_PROFFS section */ dof_stridx_t dofpv_name; /* provider name string */ dof_attr_t dofpv_provattr; /* provider attributes */ dof_attr_t dofpv_modattr; /* module attributes */ dof_attr_t dofpv_funcattr; /* function attributes */ dof_attr_t dofpv_nameattr; /* name attributes */ dof_attr_t dofpv_argsattr; /* args attributes */ dof_secidx_t dofpv_prenoffs; /* link to DOF_SECT_PRENOFFS section */ } dof_provider_t; typedef struct dof_probe { uint64_t dofpr_addr; /* probe base address or offset */ dof_stridx_t dofpr_func; /* probe function string */ dof_stridx_t dofpr_name; /* probe name string */ dof_stridx_t dofpr_nargv; /* native argument type strings */ dof_stridx_t dofpr_xargv; /* translated argument type strings */ uint32_t dofpr_argidx; /* index of first argument mapping */ uint32_t dofpr_offidx; /* index of first offset entry */ uint8_t dofpr_nargc; /* native argument count */ uint8_t dofpr_xargc; /* translated argument count */ uint16_t dofpr_noffs; /* number of offset entries for probe */ uint32_t dofpr_enoffidx; /* index of first is-enabled offset */ uint16_t dofpr_nenoffs; /* number of is-enabled offsets */ uint16_t dofpr_pad1; /* reserved for future use */ uint32_t dofpr_pad2; /* reserved for future use */ } dof_probe_t; typedef struct dof_xlator { dof_secidx_t dofxl_members; /* link to DOF_SECT_XLMEMBERS section */ dof_secidx_t dofxl_strtab; /* link to DOF_SECT_STRTAB section */ dof_stridx_t dofxl_argv; /* input parameter type strings */ uint32_t dofxl_argc; /* input parameter list length */ dof_stridx_t dofxl_type; /* output type string name */ dof_attr_t dofxl_attr; /* output stability attributes */ } dof_xlator_t; typedef struct dof_xlmember { dof_secidx_t dofxm_difo; /* member link to DOF_SECT_DIFOHDR */ dof_stridx_t dofxm_name; /* member name */ dtrace_diftype_t dofxm_type; /* member type */ } dof_xlmember_t; typedef struct dof_xlref { dof_secidx_t dofxr_xlator; /* link to DOF_SECT_XLATORS section */ uint32_t dofxr_member; /* index of referenced dof_xlmember */ uint32_t dofxr_argn; /* index of argument for DIF_OP_XLARG */ } dof_xlref_t; /* * DTrace Intermediate Format Object (DIFO) * * A DIFO is used to store the compiled DIF for a D expression, its return * type, and its string and variable tables. The string table is a single * buffer of character data into which sets instructions and variable * references can reference strings using a byte offset. The variable table * is an array of dtrace_difv_t structures that describe the name and type of * each variable and the id used in the DIF code. This structure is described * above in the DIF section of this header file. The DIFO is used at both * user-level (in the library) and in the kernel, but the structure is never * passed between the two: the DOF structures form the only interface. As a * result, the definition can change depending on the presence of _KERNEL. */ typedef struct dtrace_difo { dif_instr_t *dtdo_buf; /* instruction buffer */ uint64_t *dtdo_inttab; /* integer table (optional) */ char *dtdo_strtab; /* string table (optional) */ dtrace_difv_t *dtdo_vartab; /* variable table (optional) */ uint_t dtdo_len; /* length of instruction buffer */ uint_t dtdo_intlen; /* length of integer table */ uint_t dtdo_strlen; /* length of string table */ uint_t dtdo_varlen; /* length of variable table */ dtrace_diftype_t dtdo_rtype; /* return type */ uint_t dtdo_refcnt; /* owner reference count */ uint_t dtdo_destructive; /* invokes destructive subroutines */ #ifndef _KERNEL dof_relodesc_t *dtdo_kreltab; /* kernel relocations */ dof_relodesc_t *dtdo_ureltab; /* user relocations */ struct dt_node **dtdo_xlmtab; /* translator references */ uint_t dtdo_krelen; /* length of krelo table */ uint_t dtdo_urelen; /* length of urelo table */ uint_t dtdo_xlmlen; /* length of translator table */ #endif } dtrace_difo_t; /* * DTrace Enabling Description Structures * * When DTrace is tracking the description of a DTrace enabling entity (probe, * predicate, action, ECB, record, etc.), it does so in a description * structure. These structures all end in "desc", and are used at both * user-level and in the kernel -- but (with the exception of * dtrace_probedesc_t) they are never passed between them. Typically, * user-level will use the description structures when assembling an enabling. * It will then distill those description structures into a DOF object (see * above), and send it into the kernel. The kernel will again use the * description structures to create a description of the enabling as it reads * the DOF. When the description is complete, the enabling will be actually * created -- turning it into the structures that represent the enabling * instead of merely describing it. Not surprisingly, the description * structures bear a strong resemblance to the DOF structures that act as their * conduit. */ struct dtrace_predicate; typedef struct dtrace_probedesc { dtrace_id_t dtpd_id; /* probe identifier */ char dtpd_provider[DTRACE_PROVNAMELEN]; /* probe provider name */ char dtpd_mod[DTRACE_MODNAMELEN]; /* probe module name */ char dtpd_func[DTRACE_FUNCNAMELEN]; /* probe function name */ char dtpd_name[DTRACE_NAMELEN]; /* probe name */ } dtrace_probedesc_t; typedef struct dtrace_repldesc { dtrace_probedesc_t dtrpd_match; /* probe descr. to match */ dtrace_probedesc_t dtrpd_create; /* probe descr. to create */ } dtrace_repldesc_t; typedef struct dtrace_preddesc { dtrace_difo_t *dtpdd_difo; /* pointer to DIF object */ struct dtrace_predicate *dtpdd_predicate; /* pointer to predicate */ } dtrace_preddesc_t; typedef struct dtrace_actdesc { dtrace_difo_t *dtad_difo; /* pointer to DIF object */ struct dtrace_actdesc *dtad_next; /* next action */ dtrace_actkind_t dtad_kind; /* kind of action */ uint32_t dtad_ntuple; /* number in tuple */ uint64_t dtad_arg; /* action argument */ uint64_t dtad_uarg; /* user argument */ int dtad_refcnt; /* reference count */ } dtrace_actdesc_t; typedef struct dtrace_ecbdesc { dtrace_actdesc_t *dted_action; /* action description(s) */ dtrace_preddesc_t dted_pred; /* predicate description */ dtrace_probedesc_t dted_probe; /* probe description */ uint64_t dted_uarg; /* library argument */ int dted_refcnt; /* reference count */ } dtrace_ecbdesc_t; /* * DTrace Metadata Description Structures * * DTrace separates the trace data stream from the metadata stream. The only * metadata tokens placed in the data stream are the dtrace_rechdr_t (EPID + * timestamp) or (in the case of aggregations) aggregation identifiers. To * determine the structure of the data, DTrace consumers pass the token to the * kernel, and receive in return a corresponding description of the enabled * probe (via the dtrace_eprobedesc structure) or the aggregation (via the * dtrace_aggdesc structure). Both of these structures are expressed in terms * of record descriptions (via the dtrace_recdesc structure) that describe the * exact structure of the data. Some record descriptions may also contain a * format identifier; this additional bit of metadata can be retrieved from the * kernel, for which a format description is returned via the dtrace_fmtdesc * structure. Note that all four of these structures must be bitness-neutral * to allow for a 32-bit DTrace consumer on a 64-bit kernel. */ typedef struct dtrace_recdesc { dtrace_actkind_t dtrd_action; /* kind of action */ uint32_t dtrd_size; /* size of record */ uint32_t dtrd_offset; /* offset in ECB's data */ uint16_t dtrd_alignment; /* required alignment */ uint16_t dtrd_format; /* format, if any */ uint64_t dtrd_arg; /* action argument */ uint64_t dtrd_uarg; /* user argument */ } dtrace_recdesc_t; typedef struct dtrace_eprobedesc { dtrace_epid_t dtepd_epid; /* enabled probe ID */ dtrace_id_t dtepd_probeid; /* probe ID */ uint64_t dtepd_uarg; /* library argument */ uint32_t dtepd_size; /* total size */ int dtepd_nrecs; /* number of records */ dtrace_recdesc_t dtepd_rec[1]; /* records themselves */ } dtrace_eprobedesc_t; typedef struct dtrace_aggdesc { DTRACE_PTR(char, dtagd_name); /* not filled in by kernel */ dtrace_aggvarid_t dtagd_varid; /* not filled in by kernel */ int dtagd_flags; /* not filled in by kernel */ dtrace_aggid_t dtagd_id; /* aggregation ID */ dtrace_epid_t dtagd_epid; /* enabled probe ID */ uint32_t dtagd_size; /* size in bytes */ int dtagd_nrecs; /* number of records */ uint32_t dtagd_pad; /* explicit padding */ dtrace_recdesc_t dtagd_rec[1]; /* record descriptions */ } dtrace_aggdesc_t; typedef struct dtrace_fmtdesc { DTRACE_PTR(char, dtfd_string); /* format string */ int dtfd_length; /* length of format string */ uint16_t dtfd_format; /* format identifier */ } dtrace_fmtdesc_t; #define DTRACE_SIZEOF_EPROBEDESC(desc) \ (sizeof (dtrace_eprobedesc_t) + ((desc)->dtepd_nrecs ? \ (((desc)->dtepd_nrecs - 1) * sizeof (dtrace_recdesc_t)) : 0)) #define DTRACE_SIZEOF_AGGDESC(desc) \ (sizeof (dtrace_aggdesc_t) + ((desc)->dtagd_nrecs ? \ (((desc)->dtagd_nrecs - 1) * sizeof (dtrace_recdesc_t)) : 0)) /* * DTrace Option Interface * * Run-time DTrace options are set and retrieved via DOF_SECT_OPTDESC sections * in a DOF image. The dof_optdesc structure contains an option identifier and * an option value. The valid option identifiers are found below; the mapping * between option identifiers and option identifying strings is maintained at * user-level. Note that the value of DTRACEOPT_UNSET is such that all of the * following are potentially valid option values: all positive integers, zero * and negative one. Some options (notably "bufpolicy" and "bufresize") take * predefined tokens as their values; these are defined with * DTRACEOPT_{option}_{token}. */ #define DTRACEOPT_BUFSIZE 0 /* buffer size */ #define DTRACEOPT_BUFPOLICY 1 /* buffer policy */ #define DTRACEOPT_DYNVARSIZE 2 /* dynamic variable size */ #define DTRACEOPT_AGGSIZE 3 /* aggregation size */ #define DTRACEOPT_SPECSIZE 4 /* speculation size */ #define DTRACEOPT_NSPEC 5 /* number of speculations */ #define DTRACEOPT_STRSIZE 6 /* string size */ #define DTRACEOPT_CLEANRATE 7 /* dynvar cleaning rate */ #define DTRACEOPT_CPU 8 /* CPU to trace */ #define DTRACEOPT_BUFRESIZE 9 /* buffer resizing policy */ #define DTRACEOPT_GRABANON 10 /* grab anonymous state, if any */ #define DTRACEOPT_FLOWINDENT 11 /* indent function entry/return */ #define DTRACEOPT_QUIET 12 /* only output explicitly traced data */ #define DTRACEOPT_STACKFRAMES 13 /* number of stack frames */ #define DTRACEOPT_USTACKFRAMES 14 /* number of user stack frames */ #define DTRACEOPT_AGGRATE 15 /* aggregation snapshot rate */ #define DTRACEOPT_SWITCHRATE 16 /* buffer switching rate */ #define DTRACEOPT_STATUSRATE 17 /* status rate */ #define DTRACEOPT_DESTRUCTIVE 18 /* destructive actions allowed */ #define DTRACEOPT_STACKINDENT 19 /* output indent for stack traces */ #define DTRACEOPT_RAWBYTES 20 /* always print bytes in raw form */ #define DTRACEOPT_JSTACKFRAMES 21 /* number of jstack() frames */ #define DTRACEOPT_JSTACKSTRSIZE 22 /* size of jstack() string table */ #define DTRACEOPT_AGGSORTKEY 23 /* sort aggregations by key */ #define DTRACEOPT_AGGSORTREV 24 /* reverse-sort aggregations */ #define DTRACEOPT_AGGSORTPOS 25 /* agg. position to sort on */ #define DTRACEOPT_AGGSORTKEYPOS 26 /* agg. key position to sort on */ #define DTRACEOPT_TEMPORAL 27 /* temporally ordered output */ #define DTRACEOPT_AGGHIST 28 /* histogram aggregation output */ #define DTRACEOPT_AGGPACK 29 /* packed aggregation output */ #define DTRACEOPT_AGGZOOM 30 /* zoomed aggregation scaling */ #define DTRACEOPT_ZONE 31 /* zone in which to enable probes */ #define DTRACEOPT_MAX 32 /* number of options */ #define DTRACEOPT_UNSET (dtrace_optval_t)-2 /* unset option */ #define DTRACEOPT_BUFPOLICY_RING 0 /* ring buffer */ #define DTRACEOPT_BUFPOLICY_FILL 1 /* fill buffer, then stop */ #define DTRACEOPT_BUFPOLICY_SWITCH 2 /* switch buffers */ #define DTRACEOPT_BUFRESIZE_AUTO 0 /* automatic resizing */ #define DTRACEOPT_BUFRESIZE_MANUAL 1 /* manual resizing */ /* * DTrace Buffer Interface * * In order to get a snapshot of the principal or aggregation buffer, * user-level passes a buffer description to the kernel with the dtrace_bufdesc * structure. This describes which CPU user-level is interested in, and * where user-level wishes the kernel to snapshot the buffer to (the * dtbd_data field). The kernel uses the same structure to pass back some * information regarding the buffer: the size of data actually copied out, the * number of drops, the number of errors, the offset of the oldest record, * and the time of the snapshot. * * If the buffer policy is a "switch" policy, taking a snapshot of the * principal buffer has the additional effect of switching the active and * inactive buffers. Taking a snapshot of the aggregation buffer _always_ has * the additional effect of switching the active and inactive buffers. */ typedef struct dtrace_bufdesc { uint64_t dtbd_size; /* size of buffer */ uint32_t dtbd_cpu; /* CPU or DTRACE_CPUALL */ uint32_t dtbd_errors; /* number of errors */ uint64_t dtbd_drops; /* number of drops */ DTRACE_PTR(char, dtbd_data); /* data */ uint64_t dtbd_oldest; /* offset of oldest record */ uint64_t dtbd_timestamp; /* hrtime of snapshot */ } dtrace_bufdesc_t; /* * Each record in the buffer (dtbd_data) begins with a header that includes * the epid and a timestamp. The timestamp is split into two 4-byte parts * so that we do not require 8-byte alignment. */ typedef struct dtrace_rechdr { dtrace_epid_t dtrh_epid; /* enabled probe id */ uint32_t dtrh_timestamp_hi; /* high bits of hrtime_t */ uint32_t dtrh_timestamp_lo; /* low bits of hrtime_t */ } dtrace_rechdr_t; #define DTRACE_RECORD_LOAD_TIMESTAMP(dtrh) \ ((dtrh)->dtrh_timestamp_lo + \ ((uint64_t)(dtrh)->dtrh_timestamp_hi << 32)) #define DTRACE_RECORD_STORE_TIMESTAMP(dtrh, hrtime) { \ (dtrh)->dtrh_timestamp_lo = (uint32_t)hrtime; \ (dtrh)->dtrh_timestamp_hi = hrtime >> 32; \ } /* * DTrace Status * * The status of DTrace is relayed via the dtrace_status structure. This * structure contains members to count drops other than the capacity drops * available via the buffer interface (see above). This consists of dynamic * drops (including capacity dynamic drops, rinsing drops and dirty drops), and * speculative drops (including capacity speculative drops, drops due to busy * speculative buffers and drops due to unavailable speculative buffers). * Additionally, the status structure contains a field to indicate the number * of "fill"-policy buffers have been filled and a boolean field to indicate * that exit() has been called. If the dtst_exiting field is non-zero, no * further data will be generated until tracing is stopped (at which time any * enablings of the END action will be processed); if user-level sees that * this field is non-zero, tracing should be stopped as soon as possible. */ typedef struct dtrace_status { uint64_t dtst_dyndrops; /* dynamic drops */ uint64_t dtst_dyndrops_rinsing; /* dyn drops due to rinsing */ uint64_t dtst_dyndrops_dirty; /* dyn drops due to dirty */ uint64_t dtst_specdrops; /* speculative drops */ uint64_t dtst_specdrops_busy; /* spec drops due to busy */ uint64_t dtst_specdrops_unavail; /* spec drops due to unavail */ uint64_t dtst_errors; /* total errors */ uint64_t dtst_filled; /* number of filled bufs */ uint64_t dtst_stkstroverflows; /* stack string tab overflows */ uint64_t dtst_dblerrors; /* errors in ERROR probes */ char dtst_killed; /* non-zero if killed */ char dtst_exiting; /* non-zero if exit() called */ char dtst_pad[6]; /* pad out to 64-bit align */ } dtrace_status_t; /* * DTrace Configuration * * User-level may need to understand some elements of the kernel DTrace * configuration in order to generate correct DIF. This information is * conveyed via the dtrace_conf structure. */ typedef struct dtrace_conf { uint_t dtc_difversion; /* supported DIF version */ uint_t dtc_difintregs; /* # of DIF integer registers */ uint_t dtc_diftupregs; /* # of DIF tuple registers */ uint_t dtc_ctfmodel; /* CTF data model */ uint_t dtc_pad[8]; /* reserved for future use */ } dtrace_conf_t; /* * DTrace Faults * * The constants below DTRACEFLT_LIBRARY indicate probe processing faults; * constants at or above DTRACEFLT_LIBRARY indicate faults in probe * postprocessing at user-level. Probe processing faults induce an ERROR * probe and are replicated in unistd.d to allow users' ERROR probes to decode * the error condition using thse symbolic labels. */ #define DTRACEFLT_UNKNOWN 0 /* Unknown fault */ #define DTRACEFLT_BADADDR 1 /* Bad address */ #define DTRACEFLT_BADALIGN 2 /* Bad alignment */ #define DTRACEFLT_ILLOP 3 /* Illegal operation */ #define DTRACEFLT_DIVZERO 4 /* Divide-by-zero */ #define DTRACEFLT_NOSCRATCH 5 /* Out of scratch space */ #define DTRACEFLT_KPRIV 6 /* Illegal kernel access */ #define DTRACEFLT_UPRIV 7 /* Illegal user access */ #define DTRACEFLT_TUPOFLOW 8 /* Tuple stack overflow */ #define DTRACEFLT_BADSTACK 9 /* Bad stack */ #define DTRACEFLT_LIBRARY 1000 /* Library-level fault */ /* * DTrace Argument Types * * Because it would waste both space and time, argument types do not reside * with the probe. In order to determine argument types for args[X] * variables, the D compiler queries for argument types on a probe-by-probe * basis. (This optimizes for the common case that arguments are either not * used or used in an untyped fashion.) Typed arguments are specified with a * string of the type name in the dtragd_native member of the argument * description structure. Typed arguments may be further translated to types * of greater stability; the provider indicates such a translated argument by * filling in the dtargd_xlate member with the string of the translated type. * Finally, the provider may indicate which argument value a given argument * maps to by setting the dtargd_mapping member -- allowing a single argument * to map to multiple args[X] variables. */ typedef struct dtrace_argdesc { dtrace_id_t dtargd_id; /* probe identifier */ int dtargd_ndx; /* arg number (-1 iff none) */ int dtargd_mapping; /* value mapping */ char dtargd_native[DTRACE_ARGTYPELEN]; /* native type name */ char dtargd_xlate[DTRACE_ARGTYPELEN]; /* translated type name */ } dtrace_argdesc_t; /* * DTrace Stability Attributes * * Each DTrace provider advertises the name and data stability of each of its * probe description components, as well as its architectural dependencies. * The D compiler can query the provider attributes (dtrace_pattr_t below) in * order to compute the properties of an input program and report them. */ typedef uint8_t dtrace_stability_t; /* stability code (see attributes(5)) */ typedef uint8_t dtrace_class_t; /* architectural dependency class */ #define DTRACE_STABILITY_INTERNAL 0 /* private to DTrace itself */ #define DTRACE_STABILITY_PRIVATE 1 /* private to Sun (see docs) */ #define DTRACE_STABILITY_OBSOLETE 2 /* scheduled for removal */ #define DTRACE_STABILITY_EXTERNAL 3 /* not controlled by Sun */ #define DTRACE_STABILITY_UNSTABLE 4 /* new or rapidly changing */ #define DTRACE_STABILITY_EVOLVING 5 /* less rapidly changing */ #define DTRACE_STABILITY_STABLE 6 /* mature interface from Sun */ #define DTRACE_STABILITY_STANDARD 7 /* industry standard */ #define DTRACE_STABILITY_MAX 7 /* maximum valid stability */ #define DTRACE_CLASS_UNKNOWN 0 /* unknown architectural dependency */ #define DTRACE_CLASS_CPU 1 /* CPU-module-specific */ #define DTRACE_CLASS_PLATFORM 2 /* platform-specific (uname -i) */ #define DTRACE_CLASS_GROUP 3 /* hardware-group-specific (uname -m) */ #define DTRACE_CLASS_ISA 4 /* ISA-specific (uname -p) */ #define DTRACE_CLASS_COMMON 5 /* common to all systems */ #define DTRACE_CLASS_MAX 5 /* maximum valid class */ #define DTRACE_PRIV_NONE 0x0000 #define DTRACE_PRIV_KERNEL 0x0001 #define DTRACE_PRIV_USER 0x0002 #define DTRACE_PRIV_PROC 0x0004 #define DTRACE_PRIV_OWNER 0x0008 #define DTRACE_PRIV_ZONEOWNER 0x0010 #define DTRACE_PRIV_ALL \ (DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER | \ DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER) typedef struct dtrace_ppriv { uint32_t dtpp_flags; /* privilege flags */ uid_t dtpp_uid; /* user ID */ zoneid_t dtpp_zoneid; /* zone ID */ } dtrace_ppriv_t; typedef struct dtrace_attribute { dtrace_stability_t dtat_name; /* entity name stability */ dtrace_stability_t dtat_data; /* entity data stability */ dtrace_class_t dtat_class; /* entity data dependency */ } dtrace_attribute_t; typedef struct dtrace_pattr { dtrace_attribute_t dtpa_provider; /* provider attributes */ dtrace_attribute_t dtpa_mod; /* module attributes */ dtrace_attribute_t dtpa_func; /* function attributes */ dtrace_attribute_t dtpa_name; /* name attributes */ dtrace_attribute_t dtpa_args; /* args[] attributes */ } dtrace_pattr_t; typedef struct dtrace_providerdesc { char dtvd_name[DTRACE_PROVNAMELEN]; /* provider name */ dtrace_pattr_t dtvd_attr; /* stability attributes */ dtrace_ppriv_t dtvd_priv; /* privileges required */ } dtrace_providerdesc_t; /* * DTrace Pseudodevice Interface * * DTrace is controlled through ioctl(2)'s to the in-kernel dtrace:dtrace * pseudodevice driver. These ioctls comprise the user-kernel interface to * DTrace. */ #ifdef illumos #define DTRACEIOC (('d' << 24) | ('t' << 16) | ('r' << 8)) #define DTRACEIOC_PROVIDER (DTRACEIOC | 1) /* provider query */ #define DTRACEIOC_PROBES (DTRACEIOC | 2) /* probe query */ #define DTRACEIOC_BUFSNAP (DTRACEIOC | 4) /* snapshot buffer */ #define DTRACEIOC_PROBEMATCH (DTRACEIOC | 5) /* match probes */ #define DTRACEIOC_ENABLE (DTRACEIOC | 6) /* enable probes */ #define DTRACEIOC_AGGSNAP (DTRACEIOC | 7) /* snapshot agg. */ #define DTRACEIOC_EPROBE (DTRACEIOC | 8) /* get eprobe desc. */ #define DTRACEIOC_PROBEARG (DTRACEIOC | 9) /* get probe arg */ #define DTRACEIOC_CONF (DTRACEIOC | 10) /* get config. */ #define DTRACEIOC_STATUS (DTRACEIOC | 11) /* get status */ #define DTRACEIOC_GO (DTRACEIOC | 12) /* start tracing */ #define DTRACEIOC_STOP (DTRACEIOC | 13) /* stop tracing */ #define DTRACEIOC_AGGDESC (DTRACEIOC | 15) /* get agg. desc. */ #define DTRACEIOC_FORMAT (DTRACEIOC | 16) /* get format str */ #define DTRACEIOC_DOFGET (DTRACEIOC | 17) /* get DOF */ #define DTRACEIOC_REPLICATE (DTRACEIOC | 18) /* replicate enab */ #else #define DTRACEIOC_PROVIDER _IOWR('x',1,dtrace_providerdesc_t) /* provider query */ #define DTRACEIOC_PROBES _IOWR('x',2,dtrace_probedesc_t) /* probe query */ #define DTRACEIOC_BUFSNAP _IOW('x',4,dtrace_bufdesc_t *) /* snapshot buffer */ #define DTRACEIOC_PROBEMATCH _IOWR('x',5,dtrace_probedesc_t) /* match probes */ typedef struct { void *dof; /* DOF userland address written to driver. */ int n_matched; /* # matches returned by driver. */ } dtrace_enable_io_t; #define DTRACEIOC_ENABLE _IOWR('x',6,dtrace_enable_io_t) /* enable probes */ #define DTRACEIOC_AGGSNAP _IOW('x',7,dtrace_bufdesc_t *) /* snapshot agg. */ #define DTRACEIOC_EPROBE _IOW('x',8,dtrace_eprobedesc_t) /* get eprobe desc. */ #define DTRACEIOC_PROBEARG _IOWR('x',9,dtrace_argdesc_t) /* get probe arg */ #define DTRACEIOC_CONF _IOR('x',10,dtrace_conf_t) /* get config. */ #define DTRACEIOC_STATUS _IOR('x',11,dtrace_status_t) /* get status */ #define DTRACEIOC_GO _IOR('x',12,processorid_t) /* start tracing */ #define DTRACEIOC_STOP _IOWR('x',13,processorid_t) /* stop tracing */ #define DTRACEIOC_AGGDESC _IOW('x',15,dtrace_aggdesc_t *) /* get agg. desc. */ #define DTRACEIOC_FORMAT _IOWR('x',16,dtrace_fmtdesc_t) /* get format str */ #define DTRACEIOC_DOFGET _IOW('x',17,dof_hdr_t *) /* get DOF */ #define DTRACEIOC_REPLICATE _IOW('x',18,dtrace_repldesc_t) /* replicate enab */ #endif /* * DTrace Helpers * * In general, DTrace establishes probes in processes and takes actions on * processes without knowing their specific user-level structures. Instead of * existing in the framework, process-specific knowledge is contained by the * enabling D program -- which can apply process-specific knowledge by making * appropriate use of DTrace primitives like copyin() and copyinstr() to * operate on user-level data. However, there may exist some specific probes * of particular semantic relevance that the application developer may wish to * explicitly export. For example, an application may wish to export a probe * at the point that it begins and ends certain well-defined transactions. In * addition to providing probes, programs may wish to offer assistance for * certain actions. For example, in highly dynamic environments (e.g., Java), * it may be difficult to obtain a stack trace in terms of meaningful symbol * names (the translation from instruction addresses to corresponding symbol * names may only be possible in situ); these environments may wish to define * a series of actions to be applied in situ to obtain a meaningful stack * trace. * * These two mechanisms -- user-level statically defined tracing and assisting * DTrace actions -- are provided via DTrace _helpers_. Helpers are specified * via DOF, but unlike enabling DOF, helper DOF may contain definitions of * providers, probes and their arguments. If a helper wishes to provide * action assistance, probe descriptions and corresponding DIF actions may be * specified in the helper DOF. For such helper actions, however, the probe * description describes the specific helper: all DTrace helpers have the * provider name "dtrace" and the module name "helper", and the name of the * helper is contained in the function name (for example, the ustack() helper * is named "ustack"). Any helper-specific name may be contained in the name * (for example, if a helper were to have a constructor, it might be named * "dtrace:helper::init"). Helper actions are only called when the * action that they are helping is taken. Helper actions may only return DIF * expressions, and may only call the following subroutines: * * alloca() <= Allocates memory out of the consumer's scratch space * bcopy() <= Copies memory to scratch space * copyin() <= Copies memory from user-level into consumer's scratch * copyinto() <= Copies memory into a specific location in scratch * copyinstr() <= Copies a string into a specific location in scratch * * Helper actions may only access the following built-in variables: * * curthread <= Current kthread_t pointer * tid <= Current thread identifier * pid <= Current process identifier * ppid <= Parent process identifier * uid <= Current user ID * gid <= Current group ID * execname <= Current executable name * zonename <= Current zone name * * Helper actions may not manipulate or allocate dynamic variables, but they * may have clause-local and statically-allocated global variables. The * helper action variable state is specific to the helper action -- variables * used by the helper action may not be accessed outside of the helper * action, and the helper action may not access variables that like outside * of it. Helper actions may not load from kernel memory at-large; they are * restricting to loading current user state (via copyin() and variants) and * scratch space. As with probe enablings, helper actions are executed in * program order. The result of the helper action is the result of the last * executing helper expression. * * Helpers -- composed of either providers/probes or probes/actions (or both) * -- are added by opening the "helper" minor node, and issuing an ioctl(2) * (DTRACEHIOC_ADDDOF) that specifies the dof_helper_t structure. This * encapsulates the name and base address of the user-level library or * executable publishing the helpers and probes as well as the DOF that * contains the definitions of those helpers and probes. * * The DTRACEHIOC_ADD and DTRACEHIOC_REMOVE are left in place for legacy * helpers and should no longer be used. No other ioctls are valid on the * helper minor node. */ #ifdef illumos #define DTRACEHIOC (('d' << 24) | ('t' << 16) | ('h' << 8)) #define DTRACEHIOC_ADD (DTRACEHIOC | 1) /* add helper */ #define DTRACEHIOC_REMOVE (DTRACEHIOC | 2) /* remove helper */ #define DTRACEHIOC_ADDDOF (DTRACEHIOC | 3) /* add helper DOF */ #else -#define DTRACEHIOC_ADD _IOWR('z', 1, dof_hdr_t)/* add helper */ #define DTRACEHIOC_REMOVE _IOW('z', 2, int) /* remove helper */ #define DTRACEHIOC_ADDDOF _IOWR('z', 3, dof_helper_t)/* add helper DOF */ #endif typedef struct dof_helper { char dofhp_mod[DTRACE_MODNAMELEN]; /* executable or library name */ uint64_t dofhp_addr; /* base address of object */ uint64_t dofhp_dof; /* address of helper DOF */ #ifdef __FreeBSD__ pid_t dofhp_pid; /* target process ID */ int dofhp_gen; #endif } dof_helper_t; #define DTRACEMNR_DTRACE "dtrace" /* node for DTrace ops */ #define DTRACEMNR_HELPER "helper" /* node for helpers */ #define DTRACEMNRN_DTRACE 0 /* minor for DTrace ops */ #define DTRACEMNRN_HELPER 1 /* minor for helpers */ #define DTRACEMNRN_CLONE 2 /* first clone minor */ #ifdef _KERNEL /* * DTrace Provider API * * The following functions are implemented by the DTrace framework and are * used to implement separate in-kernel DTrace providers. Common functions * are provided in uts/common/os/dtrace.c. ISA-dependent subroutines are * defined in uts//dtrace/dtrace_asm.s or uts//dtrace/dtrace_isa.c. * * The provider API has two halves: the API that the providers consume from * DTrace, and the API that providers make available to DTrace. * * 1 Framework-to-Provider API * * 1.1 Overview * * The Framework-to-Provider API is represented by the dtrace_pops structure * that the provider passes to the framework when registering itself. This * structure consists of the following members: * * dtps_provide() <-- Provide all probes, all modules * dtps_provide_module() <-- Provide all probes in specified module * dtps_enable() <-- Enable specified probe * dtps_disable() <-- Disable specified probe * dtps_suspend() <-- Suspend specified probe * dtps_resume() <-- Resume specified probe * dtps_getargdesc() <-- Get the argument description for args[X] * dtps_getargval() <-- Get the value for an argX or args[X] variable * dtps_usermode() <-- Find out if the probe was fired in user mode * dtps_destroy() <-- Destroy all state associated with this probe * * 1.2 void dtps_provide(void *arg, const dtrace_probedesc_t *spec) * * 1.2.1 Overview * * Called to indicate that the provider should provide all probes. If the * specified description is non-NULL, dtps_provide() is being called because * no probe matched a specified probe -- if the provider has the ability to * create custom probes, it may wish to create a probe that matches the * specified description. * * 1.2.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is a pointer to a probe description that the provider may * wish to consider when creating custom probes. The provider is expected to * call back into the DTrace framework via dtrace_probe_create() to create * any necessary probes. dtps_provide() may be called even if the provider * has made available all probes; the provider should check the return value * of dtrace_probe_create() to handle this case. Note that the provider need * not implement both dtps_provide() and dtps_provide_module(); see * "Arguments and Notes" for dtrace_register(), below. * * 1.2.3 Return value * * None. * * 1.2.4 Caller's context * * dtps_provide() is typically called from open() or ioctl() context, but may * be called from other contexts as well. The DTrace framework is locked in * such a way that providers may not register or unregister. This means that * the provider may not call any DTrace API that affects its registration with * the framework, including dtrace_register(), dtrace_unregister(), * dtrace_invalidate(), and dtrace_condense(). However, the context is such * that the provider may (and indeed, is expected to) call probe-related * DTrace routines, including dtrace_probe_create(), dtrace_probe_lookup(), * and dtrace_probe_arg(). * * 1.3 void dtps_provide_module(void *arg, modctl_t *mp) * * 1.3.1 Overview * * Called to indicate that the provider should provide all probes in the * specified module. * * 1.3.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is a pointer to a modctl structure that indicates the * module for which probes should be created. * * 1.3.3 Return value * * None. * * 1.3.4 Caller's context * * dtps_provide_module() may be called from open() or ioctl() context, but * may also be called from a module loading context. mod_lock is held, and * the DTrace framework is locked in such a way that providers may not * register or unregister. This means that the provider may not call any * DTrace API that affects its registration with the framework, including * dtrace_register(), dtrace_unregister(), dtrace_invalidate(), and * dtrace_condense(). However, the context is such that the provider may (and * indeed, is expected to) call probe-related DTrace routines, including * dtrace_probe_create(), dtrace_probe_lookup(), and dtrace_probe_arg(). Note * that the provider need not implement both dtps_provide() and * dtps_provide_module(); see "Arguments and Notes" for dtrace_register(), * below. * * 1.4 void dtps_enable(void *arg, dtrace_id_t id, void *parg) * * 1.4.1 Overview * * Called to enable the specified probe. * * 1.4.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the probe to be enabled. The third * argument is the probe argument as passed to dtrace_probe_create(). * dtps_enable() will be called when a probe transitions from not being * enabled at all to having one or more ECB. The number of ECBs associated * with the probe may change without subsequent calls into the provider. * When the number of ECBs drops to zero, the provider will be explicitly * told to disable the probe via dtps_disable(). dtrace_probe() should never * be called for a probe identifier that hasn't been explicitly enabled via * dtps_enable(). * * 1.4.3 Return value * * None. * * 1.4.4 Caller's context * * The DTrace framework is locked in such a way that it may not be called * back into at all. cpu_lock is held. mod_lock is not held and may not * be acquired. * * 1.5 void dtps_disable(void *arg, dtrace_id_t id, void *parg) * * 1.5.1 Overview * * Called to disable the specified probe. * * 1.5.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the probe to be disabled. The third * argument is the probe argument as passed to dtrace_probe_create(). * dtps_disable() will be called when a probe transitions from being enabled * to having zero ECBs. dtrace_probe() should never be called for a probe * identifier that has been explicitly enabled via dtps_disable(). * * 1.5.3 Return value * * None. * * 1.5.4 Caller's context * * The DTrace framework is locked in such a way that it may not be called * back into at all. cpu_lock is held. mod_lock is not held and may not * be acquired. * * 1.6 void dtps_suspend(void *arg, dtrace_id_t id, void *parg) * * 1.6.1 Overview * * Called to suspend the specified enabled probe. This entry point is for * providers that may need to suspend some or all of their probes when CPUs * are being powered on or when the boot monitor is being entered for a * prolonged period of time. * * 1.6.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the probe to be suspended. The * third argument is the probe argument as passed to dtrace_probe_create(). * dtps_suspend will only be called on an enabled probe. Providers that * provide a dtps_suspend entry point will want to take roughly the action * that it takes for dtps_disable. * * 1.6.3 Return value * * None. * * 1.6.4 Caller's context * * Interrupts are disabled. The DTrace framework is in a state such that the * specified probe cannot be disabled or destroyed for the duration of * dtps_suspend(). As interrupts are disabled, the provider is afforded * little latitude; the provider is expected to do no more than a store to * memory. * * 1.7 void dtps_resume(void *arg, dtrace_id_t id, void *parg) * * 1.7.1 Overview * * Called to resume the specified enabled probe. This entry point is for * providers that may need to resume some or all of their probes after the * completion of an event that induced a call to dtps_suspend(). * * 1.7.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the probe to be resumed. The * third argument is the probe argument as passed to dtrace_probe_create(). * dtps_resume will only be called on an enabled probe. Providers that * provide a dtps_resume entry point will want to take roughly the action * that it takes for dtps_enable. * * 1.7.3 Return value * * None. * * 1.7.4 Caller's context * * Interrupts are disabled. The DTrace framework is in a state such that the * specified probe cannot be disabled or destroyed for the duration of * dtps_resume(). As interrupts are disabled, the provider is afforded * little latitude; the provider is expected to do no more than a store to * memory. * * 1.8 void dtps_getargdesc(void *arg, dtrace_id_t id, void *parg, * dtrace_argdesc_t *desc) * * 1.8.1 Overview * * Called to retrieve the argument description for an args[X] variable. * * 1.8.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the current probe. The third * argument is the probe argument as passed to dtrace_probe_create(). The * fourth argument is a pointer to the argument description. This * description is both an input and output parameter: it contains the * index of the desired argument in the dtargd_ndx field, and expects * the other fields to be filled in upon return. If there is no argument * corresponding to the specified index, the dtargd_ndx field should be set * to DTRACE_ARGNONE. * * 1.8.3 Return value * * None. The dtargd_ndx, dtargd_native, dtargd_xlate and dtargd_mapping * members of the dtrace_argdesc_t structure are all output values. * * 1.8.4 Caller's context * * dtps_getargdesc() is called from ioctl() context. mod_lock is held, and * the DTrace framework is locked in such a way that providers may not * register or unregister. This means that the provider may not call any * DTrace API that affects its registration with the framework, including * dtrace_register(), dtrace_unregister(), dtrace_invalidate(), and * dtrace_condense(). * * 1.9 uint64_t dtps_getargval(void *arg, dtrace_id_t id, void *parg, * int argno, int aframes) * * 1.9.1 Overview * * Called to retrieve a value for an argX or args[X] variable. * * 1.9.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the current probe. The third * argument is the probe argument as passed to dtrace_probe_create(). The * fourth argument is the number of the argument (the X in the example in * 1.9.1). The fifth argument is the number of stack frames that were used * to get from the actual place in the code that fired the probe to * dtrace_probe() itself, the so-called artificial frames. This argument may * be used to descend an appropriate number of frames to find the correct * values. If this entry point is left NULL, the dtrace_getarg() built-in * function is used. * * 1.9.3 Return value * * The value of the argument. * * 1.9.4 Caller's context * * This is called from within dtrace_probe() meaning that interrupts * are disabled. No locks should be taken within this entry point. * * 1.10 int dtps_usermode(void *arg, dtrace_id_t id, void *parg) * * 1.10.1 Overview * * Called to determine if the probe was fired in a user context. * * 1.10.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the current probe. The third * argument is the probe argument as passed to dtrace_probe_create(). This * entry point must not be left NULL for providers whose probes allow for * mixed mode tracing, that is to say those probes that can fire during * kernel- _or_ user-mode execution * * 1.10.3 Return value * * A bitwise OR that encapsulates both the mode (either DTRACE_MODE_KERNEL * or DTRACE_MODE_USER) and the policy when the privilege of the enabling * is insufficient for that mode (a combination of DTRACE_MODE_NOPRIV_DROP, * DTRACE_MODE_NOPRIV_RESTRICT, and DTRACE_MODE_LIMITEDPRIV_RESTRICT). If * DTRACE_MODE_NOPRIV_DROP bit is set, insufficient privilege will result * in the probe firing being silently ignored for the enabling; if the * DTRACE_NODE_NOPRIV_RESTRICT bit is set, insufficient privilege will not * prevent probe processing for the enabling, but restrictions will be in * place that induce a UPRIV fault upon attempt to examine probe arguments * or current process state. If the DTRACE_MODE_LIMITEDPRIV_RESTRICT bit * is set, similar restrictions will be placed upon operation if the * privilege is sufficient to process the enabling, but does not otherwise * entitle the enabling to all zones. The DTRACE_MODE_NOPRIV_DROP and * DTRACE_MODE_NOPRIV_RESTRICT are mutually exclusive (and one of these * two policies must be specified), but either may be combined (or not) * with DTRACE_MODE_LIMITEDPRIV_RESTRICT. * * 1.10.4 Caller's context * * This is called from within dtrace_probe() meaning that interrupts * are disabled. No locks should be taken within this entry point. * * 1.11 void dtps_destroy(void *arg, dtrace_id_t id, void *parg) * * 1.11.1 Overview * * Called to destroy the specified probe. * * 1.11.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_register(). The * second argument is the identifier of the probe to be destroyed. The third * argument is the probe argument as passed to dtrace_probe_create(). The * provider should free all state associated with the probe. The framework * guarantees that dtps_destroy() is only called for probes that have either * been disabled via dtps_disable() or were never enabled via dtps_enable(). * Once dtps_disable() has been called for a probe, no further call will be * made specifying the probe. * * 1.11.3 Return value * * None. * * 1.11.4 Caller's context * * The DTrace framework is locked in such a way that it may not be called * back into at all. mod_lock is held. cpu_lock is not held, and may not be * acquired. * * * 2 Provider-to-Framework API * * 2.1 Overview * * The Provider-to-Framework API provides the mechanism for the provider to * register itself with the DTrace framework, to create probes, to lookup * probes and (most importantly) to fire probes. The Provider-to-Framework * consists of: * * dtrace_register() <-- Register a provider with the DTrace framework * dtrace_unregister() <-- Remove a provider's DTrace registration * dtrace_invalidate() <-- Invalidate the specified provider * dtrace_condense() <-- Remove a provider's unenabled probes * dtrace_attached() <-- Indicates whether or not DTrace has attached * dtrace_probe_create() <-- Create a DTrace probe * dtrace_probe_lookup() <-- Lookup a DTrace probe based on its name * dtrace_probe_arg() <-- Return the probe argument for a specific probe * dtrace_probe() <-- Fire the specified probe * * 2.2 int dtrace_register(const char *name, const dtrace_pattr_t *pap, * uint32_t priv, cred_t *cr, const dtrace_pops_t *pops, void *arg, * dtrace_provider_id_t *idp) * * 2.2.1 Overview * * dtrace_register() registers the calling provider with the DTrace * framework. It should generally be called by DTrace providers in their * attach(9E) entry point. * * 2.2.2 Arguments and Notes * * The first argument is the name of the provider. The second argument is a * pointer to the stability attributes for the provider. The third argument * is the privilege flags for the provider, and must be some combination of: * * DTRACE_PRIV_NONE <= All users may enable probes from this provider * * DTRACE_PRIV_PROC <= Any user with privilege of PRIV_DTRACE_PROC may * enable probes from this provider * * DTRACE_PRIV_USER <= Any user with privilege of PRIV_DTRACE_USER may * enable probes from this provider * * DTRACE_PRIV_KERNEL <= Any user with privilege of PRIV_DTRACE_KERNEL * may enable probes from this provider * * DTRACE_PRIV_OWNER <= This flag places an additional constraint on * the privilege requirements above. These probes * require either (a) a user ID matching the user * ID of the cred passed in the fourth argument * or (b) the PRIV_PROC_OWNER privilege. * * DTRACE_PRIV_ZONEOWNER<= This flag places an additional constraint on * the privilege requirements above. These probes * require either (a) a zone ID matching the zone * ID of the cred passed in the fourth argument * or (b) the PRIV_PROC_ZONE privilege. * * Note that these flags designate the _visibility_ of the probes, not * the conditions under which they may or may not fire. * * The fourth argument is the credential that is associated with the * provider. This argument should be NULL if the privilege flags don't * include DTRACE_PRIV_OWNER or DTRACE_PRIV_ZONEOWNER. If non-NULL, the * framework stashes the uid and zoneid represented by this credential * for use at probe-time, in implicit predicates. These limit visibility * of the probes to users and/or zones which have sufficient privilege to * access them. * * The fifth argument is a DTrace provider operations vector, which provides * the implementation for the Framework-to-Provider API. (See Section 1, * above.) This must be non-NULL, and each member must be non-NULL. The * exceptions to this are (1) the dtps_provide() and dtps_provide_module() * members (if the provider so desires, _one_ of these members may be left * NULL -- denoting that the provider only implements the other) and (2) * the dtps_suspend() and dtps_resume() members, which must either both be * NULL or both be non-NULL. * * The sixth argument is a cookie to be specified as the first argument for * each function in the Framework-to-Provider API. This argument may have * any value. * * The final argument is a pointer to dtrace_provider_id_t. If * dtrace_register() successfully completes, the provider identifier will be * stored in the memory pointed to be this argument. This argument must be * non-NULL. * * 2.2.3 Return value * * On success, dtrace_register() returns 0 and stores the new provider's * identifier into the memory pointed to by the idp argument. On failure, * dtrace_register() returns an errno: * * EINVAL The arguments passed to dtrace_register() were somehow invalid. * This may because a parameter that must be non-NULL was NULL, * because the name was invalid (either empty or an illegal * provider name) or because the attributes were invalid. * * No other failure code is returned. * * 2.2.4 Caller's context * * dtrace_register() may induce calls to dtrace_provide(); the provider must * hold no locks across dtrace_register() that may also be acquired by * dtrace_provide(). cpu_lock and mod_lock must not be held. * * 2.3 int dtrace_unregister(dtrace_provider_t id) * * 2.3.1 Overview * * Unregisters the specified provider from the DTrace framework. It should * generally be called by DTrace providers in their detach(9E) entry point. * * 2.3.2 Arguments and Notes * * The only argument is the provider identifier, as returned from a * successful call to dtrace_register(). As a result of calling * dtrace_unregister(), the DTrace framework will call back into the provider * via the dtps_destroy() entry point. Once dtrace_unregister() successfully * completes, however, the DTrace framework will no longer make calls through * the Framework-to-Provider API. * * 2.3.3 Return value * * On success, dtrace_unregister returns 0. On failure, dtrace_unregister() * returns an errno: * * EBUSY There are currently processes that have the DTrace pseudodevice * open, or there exists an anonymous enabling that hasn't yet * been claimed. * * No other failure code is returned. * * 2.3.4 Caller's context * * Because a call to dtrace_unregister() may induce calls through the * Framework-to-Provider API, the caller may not hold any lock across * dtrace_register() that is also acquired in any of the Framework-to- * Provider API functions. Additionally, mod_lock may not be held. * * 2.4 void dtrace_invalidate(dtrace_provider_id_t id) * * 2.4.1 Overview * * Invalidates the specified provider. All subsequent probe lookups for the * specified provider will fail, but its probes will not be removed. * * 2.4.2 Arguments and note * * The only argument is the provider identifier, as returned from a * successful call to dtrace_register(). In general, a provider's probes * always remain valid; dtrace_invalidate() is a mechanism for invalidating * an entire provider, regardless of whether or not probes are enabled or * not. Note that dtrace_invalidate() will _not_ prevent already enabled * probes from firing -- it will merely prevent any new enablings of the * provider's probes. * * 2.5 int dtrace_condense(dtrace_provider_id_t id) * * 2.5.1 Overview * * Removes all the unenabled probes for the given provider. This function is * not unlike dtrace_unregister(), except that it doesn't remove the * provider just as many of its associated probes as it can. * * 2.5.2 Arguments and Notes * * As with dtrace_unregister(), the sole argument is the provider identifier * as returned from a successful call to dtrace_register(). As a result of * calling dtrace_condense(), the DTrace framework will call back into the * given provider's dtps_destroy() entry point for each of the provider's * unenabled probes. * * 2.5.3 Return value * * Currently, dtrace_condense() always returns 0. However, consumers of this * function should check the return value as appropriate; its behavior may * change in the future. * * 2.5.4 Caller's context * * As with dtrace_unregister(), the caller may not hold any lock across * dtrace_condense() that is also acquired in the provider's entry points. * Also, mod_lock may not be held. * * 2.6 int dtrace_attached() * * 2.6.1 Overview * * Indicates whether or not DTrace has attached. * * 2.6.2 Arguments and Notes * * For most providers, DTrace makes initial contact beyond registration. * That is, once a provider has registered with DTrace, it waits to hear * from DTrace to create probes. However, some providers may wish to * proactively create probes without first being told by DTrace to do so. * If providers wish to do this, they must first call dtrace_attached() to * determine if DTrace itself has attached. If dtrace_attached() returns 0, * the provider must not make any other Provider-to-Framework API call. * * 2.6.3 Return value * * dtrace_attached() returns 1 if DTrace has attached, 0 otherwise. * * 2.7 int dtrace_probe_create(dtrace_provider_t id, const char *mod, * const char *func, const char *name, int aframes, void *arg) * * 2.7.1 Overview * * Creates a probe with specified module name, function name, and name. * * 2.7.2 Arguments and Notes * * The first argument is the provider identifier, as returned from a * successful call to dtrace_register(). The second, third, and fourth * arguments are the module name, function name, and probe name, * respectively. Of these, module name and function name may both be NULL * (in which case the probe is considered to be unanchored), or they may both * be non-NULL. The name must be non-NULL, and must point to a non-empty * string. * * The fifth argument is the number of artificial stack frames that will be * found on the stack when dtrace_probe() is called for the new probe. These * artificial frames will be automatically be pruned should the stack() or * stackdepth() functions be called as part of one of the probe's ECBs. If * the parameter doesn't add an artificial frame, this parameter should be * zero. * * The final argument is a probe argument that will be passed back to the * provider when a probe-specific operation is called. (e.g., via * dtps_enable(), dtps_disable(), etc.) * * Note that it is up to the provider to be sure that the probe that it * creates does not already exist -- if the provider is unsure of the probe's * existence, it should assure its absence with dtrace_probe_lookup() before * calling dtrace_probe_create(). * * 2.7.3 Return value * * dtrace_probe_create() always succeeds, and always returns the identifier * of the newly-created probe. * * 2.7.4 Caller's context * * While dtrace_probe_create() is generally expected to be called from * dtps_provide() and/or dtps_provide_module(), it may be called from other * non-DTrace contexts. Neither cpu_lock nor mod_lock may be held. * * 2.8 dtrace_id_t dtrace_probe_lookup(dtrace_provider_t id, const char *mod, * const char *func, const char *name) * * 2.8.1 Overview * * Looks up a probe based on provdider and one or more of module name, * function name and probe name. * * 2.8.2 Arguments and Notes * * The first argument is the provider identifier, as returned from a * successful call to dtrace_register(). The second, third, and fourth * arguments are the module name, function name, and probe name, * respectively. Any of these may be NULL; dtrace_probe_lookup() will return * the identifier of the first probe that is provided by the specified * provider and matches all of the non-NULL matching criteria. * dtrace_probe_lookup() is generally used by a provider to be check the * existence of a probe before creating it with dtrace_probe_create(). * * 2.8.3 Return value * * If the probe exists, returns its identifier. If the probe does not exist, * return DTRACE_IDNONE. * * 2.8.4 Caller's context * * While dtrace_probe_lookup() is generally expected to be called from * dtps_provide() and/or dtps_provide_module(), it may also be called from * other non-DTrace contexts. Neither cpu_lock nor mod_lock may be held. * * 2.9 void *dtrace_probe_arg(dtrace_provider_t id, dtrace_id_t probe) * * 2.9.1 Overview * * Returns the probe argument associated with the specified probe. * * 2.9.2 Arguments and Notes * * The first argument is the provider identifier, as returned from a * successful call to dtrace_register(). The second argument is a probe * identifier, as returned from dtrace_probe_lookup() or * dtrace_probe_create(). This is useful if a probe has multiple * provider-specific components to it: the provider can create the probe * once with provider-specific state, and then add to the state by looking * up the probe based on probe identifier. * * 2.9.3 Return value * * Returns the argument associated with the specified probe. If the * specified probe does not exist, or if the specified probe is not provided * by the specified provider, NULL is returned. * * 2.9.4 Caller's context * * While dtrace_probe_arg() is generally expected to be called from * dtps_provide() and/or dtps_provide_module(), it may also be called from * other non-DTrace contexts. Neither cpu_lock nor mod_lock may be held. * * 2.10 void dtrace_probe(dtrace_id_t probe, uintptr_t arg0, uintptr_t arg1, * uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) * * 2.10.1 Overview * * The epicenter of DTrace: fires the specified probes with the specified * arguments. * * 2.10.2 Arguments and Notes * * The first argument is a probe identifier as returned by * dtrace_probe_create() or dtrace_probe_lookup(). The second through sixth * arguments are the values to which the D variables "arg0" through "arg4" * will be mapped. * * dtrace_probe() should be called whenever the specified probe has fired -- * however the provider defines it. * * 2.10.3 Return value * * None. * * 2.10.4 Caller's context * * dtrace_probe() may be called in virtually any context: kernel, user, * interrupt, high-level interrupt, with arbitrary adaptive locks held, with * dispatcher locks held, with interrupts disabled, etc. The only latitude * that must be afforded to DTrace is the ability to make calls within * itself (and to its in-kernel subroutines) and the ability to access * arbitrary (but mapped) memory. On some platforms, this constrains * context. For example, on UltraSPARC, dtrace_probe() cannot be called * from any context in which TL is greater than zero. dtrace_probe() may * also not be called from any routine which may be called by dtrace_probe() * -- which includes functions in the DTrace framework and some in-kernel * DTrace subroutines. All such functions "dtrace_"; providers that * instrument the kernel arbitrarily should be sure to not instrument these * routines. */ typedef struct dtrace_pops { void (*dtps_provide)(void *arg, dtrace_probedesc_t *spec); void (*dtps_provide_module)(void *arg, modctl_t *mp); void (*dtps_enable)(void *arg, dtrace_id_t id, void *parg); void (*dtps_disable)(void *arg, dtrace_id_t id, void *parg); void (*dtps_suspend)(void *arg, dtrace_id_t id, void *parg); void (*dtps_resume)(void *arg, dtrace_id_t id, void *parg); void (*dtps_getargdesc)(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc); uint64_t (*dtps_getargval)(void *arg, dtrace_id_t id, void *parg, int argno, int aframes); int (*dtps_usermode)(void *arg, dtrace_id_t id, void *parg); void (*dtps_destroy)(void *arg, dtrace_id_t id, void *parg); } dtrace_pops_t; #define DTRACE_MODE_KERNEL 0x01 #define DTRACE_MODE_USER 0x02 #define DTRACE_MODE_NOPRIV_DROP 0x10 #define DTRACE_MODE_NOPRIV_RESTRICT 0x20 #define DTRACE_MODE_LIMITEDPRIV_RESTRICT 0x40 typedef uintptr_t dtrace_provider_id_t; extern int dtrace_register(const char *, const dtrace_pattr_t *, uint32_t, cred_t *, const dtrace_pops_t *, void *, dtrace_provider_id_t *); extern int dtrace_unregister(dtrace_provider_id_t); extern int dtrace_condense(dtrace_provider_id_t); extern void dtrace_invalidate(dtrace_provider_id_t); extern dtrace_id_t dtrace_probe_lookup(dtrace_provider_id_t, char *, char *, char *); extern dtrace_id_t dtrace_probe_create(dtrace_provider_id_t, const char *, const char *, const char *, int, void *); extern void *dtrace_probe_arg(dtrace_provider_id_t, dtrace_id_t); extern void dtrace_probe(dtrace_id_t, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4); /* * DTrace Meta Provider API * * The following functions are implemented by the DTrace framework and are * used to implement meta providers. Meta providers plug into the DTrace * framework and are used to instantiate new providers on the fly. At * present, there is only one type of meta provider and only one meta * provider may be registered with the DTrace framework at a time. The * sole meta provider type provides user-land static tracing facilities * by taking meta probe descriptions and adding a corresponding provider * into the DTrace framework. * * 1 Framework-to-Provider * * 1.1 Overview * * The Framework-to-Provider API is represented by the dtrace_mops structure * that the meta provider passes to the framework when registering itself as * a meta provider. This structure consists of the following members: * * dtms_create_probe() <-- Add a new probe to a created provider * dtms_provide_pid() <-- Create a new provider for a given process * dtms_remove_pid() <-- Remove a previously created provider * * 1.2 void dtms_create_probe(void *arg, void *parg, * dtrace_helper_probedesc_t *probedesc); * * 1.2.1 Overview * * Called by the DTrace framework to create a new probe in a provider * created by this meta provider. * * 1.2.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_meta_register(). * The second argument is the provider cookie for the associated provider; * this is obtained from the return value of dtms_provide_pid(). The third * argument is the helper probe description. * * 1.2.3 Return value * * None * * 1.2.4 Caller's context * * dtms_create_probe() is called from either ioctl() or module load context * in the context of a newly-created provider (that is, a provider that * is a result of a call to dtms_provide_pid()). The DTrace framework is * locked in such a way that meta providers may not register or unregister, * such that no other thread can call into a meta provider operation and that * atomicity is assured with respect to meta provider operations across * dtms_provide_pid() and subsequent calls to dtms_create_probe(). * The context is thus effectively single-threaded with respect to the meta * provider, and that the meta provider cannot call dtrace_meta_register() * or dtrace_meta_unregister(). However, the context is such that the * provider may (and is expected to) call provider-related DTrace provider * APIs including dtrace_probe_create(). * * 1.3 void *dtms_provide_pid(void *arg, dtrace_meta_provider_t *mprov, * pid_t pid) * * 1.3.1 Overview * * Called by the DTrace framework to instantiate a new provider given the * description of the provider and probes in the mprov argument. The * meta provider should call dtrace_register() to insert the new provider * into the DTrace framework. * * 1.3.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_meta_register(). * The second argument is a pointer to a structure describing the new * helper provider. The third argument is the process identifier for * process associated with this new provider. Note that the name of the * provider as passed to dtrace_register() should be the contatenation of * the dtmpb_provname member of the mprov argument and the processs * identifier as a string. * * 1.3.3 Return value * * The cookie for the provider that the meta provider creates. This is * the same value that it passed to dtrace_register(). * * 1.3.4 Caller's context * * dtms_provide_pid() is called from either ioctl() or module load context. * The DTrace framework is locked in such a way that meta providers may not * register or unregister. This means that the meta provider cannot call * dtrace_meta_register() or dtrace_meta_unregister(). However, the context * is such that the provider may -- and is expected to -- call * provider-related DTrace provider APIs including dtrace_register(). * * 1.4 void dtms_remove_pid(void *arg, dtrace_meta_provider_t *mprov, * pid_t pid) * * 1.4.1 Overview * * Called by the DTrace framework to remove a provider that had previously * been instantiated via the dtms_provide_pid() entry point. The meta * provider need not remove the provider immediately, but this entry * point indicates that the provider should be removed as soon as possible * using the dtrace_unregister() API. * * 1.4.2 Arguments and notes * * The first argument is the cookie as passed to dtrace_meta_register(). * The second argument is a pointer to a structure describing the helper * provider. The third argument is the process identifier for process * associated with this new provider. * * 1.4.3 Return value * * None * * 1.4.4 Caller's context * * dtms_remove_pid() is called from either ioctl() or exit() context. * The DTrace framework is locked in such a way that meta providers may not * register or unregister. This means that the meta provider cannot call * dtrace_meta_register() or dtrace_meta_unregister(). However, the context * is such that the provider may -- and is expected to -- call * provider-related DTrace provider APIs including dtrace_unregister(). */ typedef struct dtrace_helper_probedesc { char *dthpb_mod; /* probe module */ char *dthpb_func; /* probe function */ char *dthpb_name; /* probe name */ uint64_t dthpb_base; /* base address */ uint32_t *dthpb_offs; /* offsets array */ uint32_t *dthpb_enoffs; /* is-enabled offsets array */ uint32_t dthpb_noffs; /* offsets count */ uint32_t dthpb_nenoffs; /* is-enabled offsets count */ uint8_t *dthpb_args; /* argument mapping array */ uint8_t dthpb_xargc; /* translated argument count */ uint8_t dthpb_nargc; /* native argument count */ char *dthpb_xtypes; /* translated types strings */ char *dthpb_ntypes; /* native types strings */ } dtrace_helper_probedesc_t; typedef struct dtrace_helper_provdesc { char *dthpv_provname; /* provider name */ dtrace_pattr_t dthpv_pattr; /* stability attributes */ } dtrace_helper_provdesc_t; typedef struct dtrace_mops { void (*dtms_create_probe)(void *, void *, dtrace_helper_probedesc_t *); void *(*dtms_provide_pid)(void *, dtrace_helper_provdesc_t *, pid_t); void (*dtms_remove_pid)(void *, dtrace_helper_provdesc_t *, pid_t); } dtrace_mops_t; typedef uintptr_t dtrace_meta_provider_id_t; extern int dtrace_meta_register(const char *, const dtrace_mops_t *, void *, dtrace_meta_provider_id_t *); extern int dtrace_meta_unregister(dtrace_meta_provider_id_t); /* * DTrace Kernel Hooks * * The following functions are implemented by the base kernel and form a set of * hooks used by the DTrace framework. DTrace hooks are implemented in either * uts/common/os/dtrace_subr.c, an ISA-specific assembly file, or in a * uts//os/dtrace_subr.c corresponding to each hardware platform. */ typedef enum dtrace_vtime_state { DTRACE_VTIME_INACTIVE = 0, /* No DTrace, no TNF */ DTRACE_VTIME_ACTIVE, /* DTrace virtual time, no TNF */ DTRACE_VTIME_INACTIVE_TNF, /* No DTrace, TNF active */ DTRACE_VTIME_ACTIVE_TNF /* DTrace virtual time _and_ TNF */ } dtrace_vtime_state_t; #ifdef illumos extern dtrace_vtime_state_t dtrace_vtime_active; #endif extern void dtrace_vtime_switch(kthread_t *next); extern void dtrace_vtime_enable_tnf(void); extern void dtrace_vtime_disable_tnf(void); extern void dtrace_vtime_enable(void); extern void dtrace_vtime_disable(void); struct regs; struct reg; #ifdef illumos extern int (*dtrace_pid_probe_ptr)(struct reg *); extern int (*dtrace_return_probe_ptr)(struct reg *); extern void (*dtrace_fasttrap_fork_ptr)(proc_t *, proc_t *); extern void (*dtrace_fasttrap_exec_ptr)(proc_t *); extern void (*dtrace_fasttrap_exit_ptr)(proc_t *); extern void dtrace_fasttrap_fork(proc_t *, proc_t *); #endif typedef uintptr_t dtrace_icookie_t; typedef void (*dtrace_xcall_t)(void *); extern dtrace_icookie_t dtrace_interrupt_disable(void); extern void dtrace_interrupt_enable(dtrace_icookie_t); extern void dtrace_membar_producer(void); extern void dtrace_membar_consumer(void); extern void (*dtrace_cpu_init)(processorid_t); #ifdef illumos extern void (*dtrace_modload)(modctl_t *); extern void (*dtrace_modunload)(modctl_t *); #endif extern void (*dtrace_helpers_cleanup)(void); extern void (*dtrace_helpers_fork)(proc_t *parent, proc_t *child); extern void (*dtrace_cpustart_init)(void); extern void (*dtrace_cpustart_fini)(void); extern void (*dtrace_closef)(void); extern void (*dtrace_debugger_init)(void); extern void (*dtrace_debugger_fini)(void); extern dtrace_cacheid_t dtrace_predcache_id; #ifdef illumos extern hrtime_t dtrace_gethrtime(void); #else void dtrace_debug_printf(const char *, ...) __printflike(1, 2); #endif extern void dtrace_sync(void); extern void dtrace_toxic_ranges(void (*)(uintptr_t, uintptr_t)); extern void dtrace_xcall(processorid_t, dtrace_xcall_t, void *); extern void dtrace_vpanic(const char *, __va_list); extern void dtrace_panic(const char *, ...); extern int dtrace_safe_defer_signal(void); extern void dtrace_safe_synchronous_signal(void); extern int dtrace_mach_aframes(void); #if defined(__i386) || defined(__amd64) extern int dtrace_instr_size(uchar_t *instr); extern int dtrace_instr_size_isa(uchar_t *, model_t, int *); extern void dtrace_invop_callsite(void); #endif extern void dtrace_invop_add(int (*)(uintptr_t, struct trapframe *, uintptr_t)); extern void dtrace_invop_remove(int (*)(uintptr_t, struct trapframe *, uintptr_t)); #ifdef __sparc extern int dtrace_blksuword32(uintptr_t, uint32_t *, int); extern void dtrace_getfsr(uint64_t *); #endif #ifndef illumos extern void dtrace_helpers_duplicate(proc_t *, proc_t *); extern void dtrace_helpers_destroy(proc_t *); #endif #define DTRACE_CPUFLAG_ISSET(flag) \ (cpu_core[curcpu].cpuc_dtrace_flags & (flag)) #define DTRACE_CPUFLAG_SET(flag) \ (cpu_core[curcpu].cpuc_dtrace_flags |= (flag)) #define DTRACE_CPUFLAG_CLEAR(flag) \ (cpu_core[curcpu].cpuc_dtrace_flags &= ~(flag)) #endif /* _KERNEL */ #endif /* _ASM */ #if defined(__i386) || defined(__amd64) #define DTRACE_INVOP_PUSHL_EBP 1 #define DTRACE_INVOP_PUSHQ_RBP DTRACE_INVOP_PUSHL_EBP #define DTRACE_INVOP_POPL_EBP 2 #define DTRACE_INVOP_POPQ_RBP DTRACE_INVOP_POPL_EBP #define DTRACE_INVOP_LEAVE 3 #define DTRACE_INVOP_NOP 4 #define DTRACE_INVOP_RET 5 #elif defined(__powerpc__) #define DTRACE_INVOP_RET 1 #define DTRACE_INVOP_BCTR 2 #define DTRACE_INVOP_BLR 3 #define DTRACE_INVOP_JUMP 4 #define DTRACE_INVOP_MFLR_R0 5 #define DTRACE_INVOP_NOP 6 #elif defined(__arm__) #define DTRACE_INVOP_SHIFT 4 #define DTRACE_INVOP_MASK ((1 << DTRACE_INVOP_SHIFT) - 1) #define DTRACE_INVOP_DATA(x) ((x) >> DTRACE_INVOP_SHIFT) #define DTRACE_INVOP_PUSHM 1 #define DTRACE_INVOP_POPM 2 #define DTRACE_INVOP_B 3 #elif defined(__aarch64__) #define INSN_SIZE 4 #define B_MASK 0xff000000 #define B_DATA_MASK 0x00ffffff #define B_INSTR 0x14000000 #define RET_INSTR 0xd65f03c0 #define LDP_STP_MASK 0xffc00000 #define STP_32 0x29800000 #define STP_64 0xa9800000 #define LDP_32 0x28c00000 #define LDP_64 0xa8c00000 #define LDP_STP_PREIND (1 << 24) #define LDP_STP_DIR (1 << 22) /* Load instruction */ #define ARG1_SHIFT 0 #define ARG1_MASK 0x1f #define ARG2_SHIFT 10 #define ARG2_MASK 0x1f #define OFFSET_SHIFT 15 #define OFFSET_SIZE 7 #define OFFSET_MASK ((1 << OFFSET_SIZE) - 1) #define DTRACE_INVOP_PUSHM 1 #define DTRACE_INVOP_RET 2 #define DTRACE_INVOP_B 3 #elif defined(__mips__) #define INSN_SIZE 4 /* Load/Store double RA to/from SP */ #define LDSD_RA_SP_MASK 0xffff0000 #define LDSD_DATA_MASK 0x0000ffff #define SD_RA_SP 0xffbf0000 #define LD_RA_SP 0xdfbf0000 #define DTRACE_INVOP_SD 1 #define DTRACE_INVOP_LD 2 #elif defined(__riscv__) #define SD_RA_SP_MASK 0x01fff07f #define SD_RA_SP 0x00113023 #define DTRACE_INVOP_SD 1 #define DTRACE_INVOP_RET 2 #define DTRACE_INVOP_NOP 3 #endif #ifdef __cplusplus } #endif #endif /* _SYS_DTRACE_H */ Index: projects/clang400-import/sys/cddl/contrib/opensolaris =================================================================== --- projects/clang400-import/sys/cddl/contrib/opensolaris (revision 312719) +++ projects/clang400-import/sys/cddl/contrib/opensolaris (revision 312720) Property changes on: projects/clang400-import/sys/cddl/contrib/opensolaris ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/cddl/contrib/opensolaris:r312624-312719 Index: projects/clang400-import/sys/cddl/dev/dtrace/dtrace_ioctl.c =================================================================== --- projects/clang400-import/sys/cddl/dev/dtrace/dtrace_ioctl.c (revision 312719) +++ projects/clang400-import/sys/cddl/dev/dtrace/dtrace_ioctl.c (revision 312720) @@ -1,858 +1,853 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END * * $FreeBSD$ * */ static int dtrace_verbose_ioctl; SYSCTL_INT(_debug_dtrace, OID_AUTO, verbose_ioctl, CTLFLAG_RW, &dtrace_verbose_ioctl, 0, "log DTrace ioctls"); #define DTRACE_IOCTL_PRINTF(fmt, ...) if (dtrace_verbose_ioctl) printf(fmt, ## __VA_ARGS__ ) static int dtrace_ioctl_helper(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { struct proc *p; dof_helper_t *dhp; dof_hdr_t *dof; int rval; dhp = NULL; dof = NULL; rval = 0; switch (cmd) { case DTRACEHIOC_ADDDOF: dhp = (dof_helper_t *)addr; addr = (caddr_t)(uintptr_t)dhp->dofhp_dof; - /* FALLTHROUGH */ - case DTRACEHIOC_ADD: - p = curproc; - if (dhp == NULL || p->p_pid == dhp->dofhp_pid) { + if (p->p_pid == dhp->dofhp_pid) { + p = curproc; dof = dtrace_dof_copyin((uintptr_t)addr, &rval); } else { p = pfind(dhp->dofhp_pid); if (p == NULL) return (EINVAL); if (!P_SHOULDSTOP(p) || (p->p_flag & (P_TRACED | P_WEXIT)) != P_TRACED || p->p_pptr != curproc) { PROC_UNLOCK(p); return (EINVAL); } _PHOLD(p); PROC_UNLOCK(p); dof = dtrace_dof_copyin_proc(p, (uintptr_t)addr, &rval); } if (dof == NULL) { if (p != curproc) PRELE(p); break; } mutex_enter(&dtrace_lock); if ((rval = dtrace_helper_slurp(dof, dhp, p)) != -1) { - if (dhp != NULL) { - dhp->dofhp_gen = rval; - copyout(dhp, addr, sizeof(*dhp)); - } + dhp->dofhp_gen = rval; rval = 0; } else { rval = EINVAL; } mutex_exit(&dtrace_lock); if (p != curproc) PRELE(p); break; case DTRACEHIOC_REMOVE: mutex_enter(&dtrace_lock); rval = dtrace_helper_destroygen(NULL, *(int *)(uintptr_t)addr); mutex_exit(&dtrace_lock); break; default: rval = ENOTTY; break; } return (rval); } /* ARGSUSED */ static int dtrace_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags __unused, struct thread *td) { dtrace_state_t *state; devfs_get_cdevpriv((void **) &state); int error = 0; if (state == NULL) return (EINVAL); if (state->dts_anon) { ASSERT(dtrace_anon.dta_state == NULL); state = state->dts_anon; } switch (cmd) { case DTRACEIOC_AGGDESC: { dtrace_aggdesc_t **paggdesc = (dtrace_aggdesc_t **) addr; dtrace_aggdesc_t aggdesc; dtrace_action_t *act; dtrace_aggregation_t *agg; int nrecs; uint32_t offs; dtrace_recdesc_t *lrec; void *buf; size_t size; uintptr_t dest; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_AGGDESC\n",__func__,__LINE__); if (copyin((void *) *paggdesc, &aggdesc, sizeof (aggdesc)) != 0) return (EFAULT); mutex_enter(&dtrace_lock); if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { mutex_exit(&dtrace_lock); return (EINVAL); } aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; nrecs = aggdesc.dtagd_nrecs; aggdesc.dtagd_nrecs = 0; offs = agg->dtag_base; lrec = &agg->dtag_action.dta_rec; aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; for (act = agg->dtag_first; ; act = act->dta_next) { ASSERT(act->dta_intuple || DTRACEACT_ISAGG(act->dta_kind)); /* * If this action has a record size of zero, it * denotes an argument to the aggregating action. * Because the presence of this record doesn't (or * shouldn't) affect the way the data is interpreted, * we don't copy it out to save user-level the * confusion of dealing with a zero-length record. */ if (act->dta_rec.dtrd_size == 0) { ASSERT(agg->dtag_hasarg); continue; } aggdesc.dtagd_nrecs++; if (act == &agg->dtag_action) break; } /* * Now that we have the size, we need to allocate a temporary * buffer in which to store the complete description. We need * the temporary buffer to be able to drop dtrace_lock() * across the copyout(), below. */ size = sizeof (dtrace_aggdesc_t) + (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); buf = kmem_alloc(size, KM_SLEEP); dest = (uintptr_t)buf; bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); for (act = agg->dtag_first; ; act = act->dta_next) { dtrace_recdesc_t rec = act->dta_rec; /* * See the comment in the above loop for why we pass * over zero-length records. */ if (rec.dtrd_size == 0) { ASSERT(agg->dtag_hasarg); continue; } if (nrecs-- == 0) break; rec.dtrd_offset -= offs; bcopy(&rec, (void *)dest, sizeof (rec)); dest += sizeof (dtrace_recdesc_t); if (act == &agg->dtag_action) break; } mutex_exit(&dtrace_lock); if (copyout(buf, (void *) *paggdesc, dest - (uintptr_t)buf) != 0) { kmem_free(buf, size); return (EFAULT); } kmem_free(buf, size); return (0); } case DTRACEIOC_AGGSNAP: case DTRACEIOC_BUFSNAP: { dtrace_bufdesc_t **pdesc = (dtrace_bufdesc_t **) addr; dtrace_bufdesc_t desc; caddr_t cached; dtrace_buffer_t *buf; dtrace_debug_output(); if (copyin((void *) *pdesc, &desc, sizeof (desc)) != 0) return (EFAULT); DTRACE_IOCTL_PRINTF("%s(%d): %s curcpu %d cpu %d\n", __func__,__LINE__, cmd == DTRACEIOC_AGGSNAP ? "DTRACEIOC_AGGSNAP":"DTRACEIOC_BUFSNAP", curcpu, desc.dtbd_cpu); if (desc.dtbd_cpu >= NCPU) return (ENOENT); if (pcpu_find(desc.dtbd_cpu) == NULL) return (ENOENT); mutex_enter(&dtrace_lock); if (cmd == DTRACEIOC_BUFSNAP) { buf = &state->dts_buffer[desc.dtbd_cpu]; } else { buf = &state->dts_aggbuffer[desc.dtbd_cpu]; } if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { size_t sz = buf->dtb_offset; if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { mutex_exit(&dtrace_lock); return (EBUSY); } /* * If this buffer has already been consumed, we're * going to indicate that there's nothing left here * to consume. */ if (buf->dtb_flags & DTRACEBUF_CONSUMED) { mutex_exit(&dtrace_lock); desc.dtbd_size = 0; desc.dtbd_drops = 0; desc.dtbd_errors = 0; desc.dtbd_oldest = 0; sz = sizeof (desc); if (copyout(&desc, (void *) *pdesc, sz) != 0) return (EFAULT); return (0); } /* * If this is a ring buffer that has wrapped, we want * to copy the whole thing out. */ if (buf->dtb_flags & DTRACEBUF_WRAPPED) { dtrace_buffer_polish(buf); sz = buf->dtb_size; } if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { mutex_exit(&dtrace_lock); return (EFAULT); } desc.dtbd_size = sz; desc.dtbd_drops = buf->dtb_drops; desc.dtbd_errors = buf->dtb_errors; desc.dtbd_oldest = buf->dtb_xamot_offset; desc.dtbd_timestamp = dtrace_gethrtime(); mutex_exit(&dtrace_lock); if (copyout(&desc, (void *) *pdesc, sizeof (desc)) != 0) return (EFAULT); buf->dtb_flags |= DTRACEBUF_CONSUMED; return (0); } if (buf->dtb_tomax == NULL) { ASSERT(buf->dtb_xamot == NULL); mutex_exit(&dtrace_lock); return (ENOENT); } cached = buf->dtb_tomax; ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); dtrace_xcall(desc.dtbd_cpu, (dtrace_xcall_t)dtrace_buffer_switch, buf); state->dts_errors += buf->dtb_xamot_errors; /* * If the buffers did not actually switch, then the cross call * did not take place -- presumably because the given CPU is * not in the ready set. If this is the case, we'll return * ENOENT. */ if (buf->dtb_tomax == cached) { ASSERT(buf->dtb_xamot != cached); mutex_exit(&dtrace_lock); return (ENOENT); } ASSERT(cached == buf->dtb_xamot); DTRACE_IOCTL_PRINTF("%s(%d): copyout the buffer snapshot\n",__func__,__LINE__); /* * We have our snapshot; now copy it out. */ if (copyout(buf->dtb_xamot, desc.dtbd_data, buf->dtb_xamot_offset) != 0) { mutex_exit(&dtrace_lock); return (EFAULT); } desc.dtbd_size = buf->dtb_xamot_offset; desc.dtbd_drops = buf->dtb_xamot_drops; desc.dtbd_errors = buf->dtb_xamot_errors; desc.dtbd_oldest = 0; desc.dtbd_timestamp = buf->dtb_switched; mutex_exit(&dtrace_lock); DTRACE_IOCTL_PRINTF("%s(%d): copyout buffer desc: size %zd drops %lu errors %lu\n",__func__,__LINE__,(size_t) desc.dtbd_size,(u_long) desc.dtbd_drops,(u_long) desc.dtbd_errors); /* * Finally, copy out the buffer description. */ if (copyout(&desc, (void *) *pdesc, sizeof (desc)) != 0) return (EFAULT); return (0); } case DTRACEIOC_CONF: { dtrace_conf_t conf; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_CONF\n",__func__,__LINE__); bzero(&conf, sizeof (conf)); conf.dtc_difversion = DIF_VERSION; conf.dtc_difintregs = DIF_DIR_NREGS; conf.dtc_diftupregs = DIF_DTR_NREGS; conf.dtc_ctfmodel = CTF_MODEL_NATIVE; *((dtrace_conf_t *) addr) = conf; return (0); } case DTRACEIOC_DOFGET: { dof_hdr_t **pdof = (dof_hdr_t **) addr; dof_hdr_t hdr, *dof = *pdof; int rval; uint64_t len; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_DOFGET\n",__func__,__LINE__); if (copyin((void *)dof, &hdr, sizeof (hdr)) != 0) return (EFAULT); mutex_enter(&dtrace_lock); dof = dtrace_dof_create(state); mutex_exit(&dtrace_lock); len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); rval = copyout(dof, (void *) *pdof, len); dtrace_dof_destroy(dof); return (rval == 0 ? 0 : EFAULT); } case DTRACEIOC_ENABLE: { dof_hdr_t *dof = NULL; dtrace_enabling_t *enab = NULL; dtrace_vstate_t *vstate; int err = 0; int rval; dtrace_enable_io_t *p = (dtrace_enable_io_t *) addr; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_ENABLE\n",__func__,__LINE__); /* * If a NULL argument has been passed, we take this as our * cue to reevaluate our enablings. */ if (p->dof == NULL) { dtrace_enabling_matchall(); return (0); } if ((dof = dtrace_dof_copyin((uintptr_t) p->dof, &rval)) == NULL) return (EINVAL); mutex_enter(&cpu_lock); mutex_enter(&dtrace_lock); vstate = &state->dts_vstate; if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); dtrace_dof_destroy(dof); return (EBUSY); } if (dtrace_dof_slurp(dof, vstate, td->td_ucred, &enab, 0, B_TRUE) != 0) { mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); dtrace_dof_destroy(dof); return (EINVAL); } if ((rval = dtrace_dof_options(dof, state)) != 0) { dtrace_enabling_destroy(enab); mutex_exit(&dtrace_lock); mutex_exit(&cpu_lock); dtrace_dof_destroy(dof); return (rval); } if ((err = dtrace_enabling_match(enab, &p->n_matched)) == 0) { err = dtrace_enabling_retain(enab); } else { dtrace_enabling_destroy(enab); } mutex_exit(&cpu_lock); mutex_exit(&dtrace_lock); dtrace_dof_destroy(dof); return (err); } case DTRACEIOC_EPROBE: { dtrace_eprobedesc_t **pepdesc = (dtrace_eprobedesc_t **) addr; dtrace_eprobedesc_t epdesc; dtrace_ecb_t *ecb; dtrace_action_t *act; void *buf; size_t size; uintptr_t dest; int nrecs; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_EPROBE\n",__func__,__LINE__); if (copyin((void *)*pepdesc, &epdesc, sizeof (epdesc)) != 0) return (EFAULT); mutex_enter(&dtrace_lock); if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { mutex_exit(&dtrace_lock); return (EINVAL); } if (ecb->dte_probe == NULL) { mutex_exit(&dtrace_lock); return (EINVAL); } epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; epdesc.dtepd_uarg = ecb->dte_uarg; epdesc.dtepd_size = ecb->dte_size; nrecs = epdesc.dtepd_nrecs; epdesc.dtepd_nrecs = 0; for (act = ecb->dte_action; act != NULL; act = act->dta_next) { if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) continue; epdesc.dtepd_nrecs++; } /* * Now that we have the size, we need to allocate a temporary * buffer in which to store the complete description. We need * the temporary buffer to be able to drop dtrace_lock() * across the copyout(), below. */ size = sizeof (dtrace_eprobedesc_t) + (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); buf = kmem_alloc(size, KM_SLEEP); dest = (uintptr_t)buf; bcopy(&epdesc, (void *)dest, sizeof (epdesc)); dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); for (act = ecb->dte_action; act != NULL; act = act->dta_next) { if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) continue; if (nrecs-- == 0) break; bcopy(&act->dta_rec, (void *)dest, sizeof (dtrace_recdesc_t)); dest += sizeof (dtrace_recdesc_t); } mutex_exit(&dtrace_lock); if (copyout(buf, (void *) *pepdesc, dest - (uintptr_t)buf) != 0) { kmem_free(buf, size); return (EFAULT); } kmem_free(buf, size); return (0); } case DTRACEIOC_FORMAT: { dtrace_fmtdesc_t *fmt = (dtrace_fmtdesc_t *) addr; char *str; int len; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_FORMAT\n",__func__,__LINE__); mutex_enter(&dtrace_lock); if (fmt->dtfd_format == 0 || fmt->dtfd_format > state->dts_nformats) { mutex_exit(&dtrace_lock); return (EINVAL); } /* * Format strings are allocated contiguously and they are * never freed; if a format index is less than the number * of formats, we can assert that the format map is non-NULL * and that the format for the specified index is non-NULL. */ ASSERT(state->dts_formats != NULL); str = state->dts_formats[fmt->dtfd_format - 1]; ASSERT(str != NULL); len = strlen(str) + 1; if (len > fmt->dtfd_length) { fmt->dtfd_length = len; } else { if (copyout(str, fmt->dtfd_string, len) != 0) { mutex_exit(&dtrace_lock); return (EINVAL); } } mutex_exit(&dtrace_lock); return (0); } case DTRACEIOC_GO: { int rval; processorid_t *cpuid = (processorid_t *) addr; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_GO\n",__func__,__LINE__); rval = dtrace_state_go(state, cpuid); return (rval); } case DTRACEIOC_PROBEARG: { dtrace_argdesc_t *desc = (dtrace_argdesc_t *) addr; dtrace_probe_t *probe; dtrace_provider_t *prov; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_PROBEARG\n",__func__,__LINE__); if (desc->dtargd_id == DTRACE_IDNONE) return (EINVAL); if (desc->dtargd_ndx == DTRACE_ARGNONE) return (EINVAL); mutex_enter(&dtrace_provider_lock); #ifdef illumos mutex_enter(&mod_lock); #endif mutex_enter(&dtrace_lock); if (desc->dtargd_id > dtrace_nprobes) { mutex_exit(&dtrace_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); return (EINVAL); } if ((probe = dtrace_probes[desc->dtargd_id - 1]) == NULL) { mutex_exit(&dtrace_lock); #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); return (EINVAL); } mutex_exit(&dtrace_lock); prov = probe->dtpr_provider; if (prov->dtpv_pops.dtps_getargdesc == NULL) { /* * There isn't any typed information for this probe. * Set the argument number to DTRACE_ARGNONE. */ desc->dtargd_ndx = DTRACE_ARGNONE; } else { desc->dtargd_native[0] = '\0'; desc->dtargd_xlate[0] = '\0'; desc->dtargd_mapping = desc->dtargd_ndx; prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, probe->dtpr_id, probe->dtpr_arg, desc); } #ifdef illumos mutex_exit(&mod_lock); #endif mutex_exit(&dtrace_provider_lock); return (0); } case DTRACEIOC_PROBEMATCH: case DTRACEIOC_PROBES: { dtrace_probedesc_t *p_desc = (dtrace_probedesc_t *) addr; dtrace_probe_t *probe = NULL; dtrace_probekey_t pkey; dtrace_id_t i; int m = 0; uint32_t priv = 0; uid_t uid = 0; zoneid_t zoneid = 0; DTRACE_IOCTL_PRINTF("%s(%d): %s\n",__func__,__LINE__, cmd == DTRACEIOC_PROBEMATCH ? "DTRACEIOC_PROBEMATCH":"DTRACEIOC_PROBES"); p_desc->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; p_desc->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; p_desc->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; p_desc->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; /* * Before we attempt to match this probe, we want to give * all providers the opportunity to provide it. */ if (p_desc->dtpd_id == DTRACE_IDNONE) { mutex_enter(&dtrace_provider_lock); dtrace_probe_provide(p_desc, NULL); mutex_exit(&dtrace_provider_lock); p_desc->dtpd_id++; } if (cmd == DTRACEIOC_PROBEMATCH) { dtrace_probekey(p_desc, &pkey); pkey.dtpk_id = DTRACE_IDNONE; } dtrace_cred2priv(td->td_ucred, &priv, &uid, &zoneid); mutex_enter(&dtrace_lock); if (cmd == DTRACEIOC_PROBEMATCH) { for (i = p_desc->dtpd_id; i <= dtrace_nprobes; i++) { if ((probe = dtrace_probes[i - 1]) != NULL && (m = dtrace_match_probe(probe, &pkey, priv, uid, zoneid)) != 0) break; } if (m < 0) { mutex_exit(&dtrace_lock); return (EINVAL); } } else { for (i = p_desc->dtpd_id; i <= dtrace_nprobes; i++) { if ((probe = dtrace_probes[i - 1]) != NULL && dtrace_match_priv(probe, priv, uid, zoneid)) break; } } if (probe == NULL) { mutex_exit(&dtrace_lock); return (ESRCH); } dtrace_probe_description(probe, p_desc); mutex_exit(&dtrace_lock); return (0); } case DTRACEIOC_PROVIDER: { dtrace_providerdesc_t *pvd = (dtrace_providerdesc_t *) addr; dtrace_provider_t *pvp; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_PROVIDER\n",__func__,__LINE__); pvd->dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; mutex_enter(&dtrace_provider_lock); for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { if (strcmp(pvp->dtpv_name, pvd->dtvd_name) == 0) break; } mutex_exit(&dtrace_provider_lock); if (pvp == NULL) return (ESRCH); bcopy(&pvp->dtpv_priv, &pvd->dtvd_priv, sizeof (dtrace_ppriv_t)); bcopy(&pvp->dtpv_attr, &pvd->dtvd_attr, sizeof (dtrace_pattr_t)); return (0); } case DTRACEIOC_REPLICATE: { dtrace_repldesc_t *desc = (dtrace_repldesc_t *) addr; dtrace_probedesc_t *match = &desc->dtrpd_match; dtrace_probedesc_t *create = &desc->dtrpd_create; int err; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_REPLICATE\n",__func__,__LINE__); match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; mutex_enter(&dtrace_lock); err = dtrace_enabling_replicate(state, match, create); mutex_exit(&dtrace_lock); return (err); } case DTRACEIOC_STATUS: { dtrace_status_t *stat = (dtrace_status_t *) addr; dtrace_dstate_t *dstate; int i, j; uint64_t nerrs; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_STATUS\n",__func__,__LINE__); /* * See the comment in dtrace_state_deadman() for the reason * for setting dts_laststatus to INT64_MAX before setting * it to the correct value. */ state->dts_laststatus = INT64_MAX; dtrace_membar_producer(); state->dts_laststatus = dtrace_gethrtime(); bzero(stat, sizeof (*stat)); mutex_enter(&dtrace_lock); if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { mutex_exit(&dtrace_lock); return (ENOENT); } if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) stat->dtst_exiting = 1; nerrs = state->dts_errors; dstate = &state->dts_vstate.dtvs_dynvars; for (i = 0; i < NCPU; i++) { #ifndef illumos if (pcpu_find(i) == NULL) continue; #endif dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; stat->dtst_dyndrops += dcpu->dtdsc_drops; stat->dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; stat->dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) stat->dtst_filled++; nerrs += state->dts_buffer[i].dtb_errors; for (j = 0; j < state->dts_nspeculations; j++) { dtrace_speculation_t *spec; dtrace_buffer_t *buf; spec = &state->dts_speculations[j]; buf = &spec->dtsp_buffer[i]; stat->dtst_specdrops += buf->dtb_xamot_drops; } } stat->dtst_specdrops_busy = state->dts_speculations_busy; stat->dtst_specdrops_unavail = state->dts_speculations_unavail; stat->dtst_stkstroverflows = state->dts_stkstroverflows; stat->dtst_dblerrors = state->dts_dblerrors; stat->dtst_killed = (state->dts_activity == DTRACE_ACTIVITY_KILLED); stat->dtst_errors = nerrs; mutex_exit(&dtrace_lock); return (0); } case DTRACEIOC_STOP: { int rval; processorid_t *cpuid = (processorid_t *) addr; DTRACE_IOCTL_PRINTF("%s(%d): DTRACEIOC_STOP\n",__func__,__LINE__); mutex_enter(&dtrace_lock); rval = dtrace_state_stop(state, cpuid); mutex_exit(&dtrace_lock); return (rval); } default: error = ENOTTY; } return (error); } Index: projects/clang400-import/sys/conf/files =================================================================== --- projects/clang400-import/sys/conf/files (revision 312719) +++ projects/clang400-import/sys/conf/files (revision 312720) @@ -1,4669 +1,4694 @@ # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # acpi_quirks.h optional acpi \ dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ no-obj no-implicit-rule before-depend \ clean "acpi_quirks.h" bhnd_nvram_map.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map.h" bhnd_nvram_map_data.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map_data.h" # # The 'fdt_dtb_file' target covers an actual DTB file name, which is derived # from the specified source (DTS) file: .dts -> .dtb # fdt_dtb_file optional fdt fdt_dtb_static \ compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtb.sh $S ${FDT_DTS_FILE} ${.CURDIR}'" \ no-obj no-implicit-rule before-depend \ clean "${FDT_DTS_FILE:R}.dtb" fdt_static_dtb.h optional fdt fdt_dtb_static \ compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ${.CURDIR}'" \ dependency "fdt_dtb_file" \ no-obj no-implicit-rule before-depend \ clean "fdt_static_dtb.h" feeder_eq_gen.h optional sound \ dependency "$S/tools/sound/feeder_eq_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_eq_mkfilter.awk -- ${FEEDER_EQ_PRESETS} > feeder_eq_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_eq_gen.h" feeder_rate_gen.h optional sound \ dependency "$S/tools/sound/feeder_rate_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_rate_mkfilter.awk -- ${FEEDER_RATE_PRESETS} > feeder_rate_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_rate_gen.h" snd_fxdiv_gen.h optional sound \ dependency "$S/tools/sound/snd_fxdiv_gen.awk" \ compile-with "${AWK} -f $S/tools/sound/snd_fxdiv_gen.awk -- > snd_fxdiv_gen.h" \ no-obj no-implicit-rule before-depend \ clean "snd_fxdiv_gen.h" miidevs.h optional miibus | mii \ dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ no-obj no-implicit-rule before-depend \ clean "miidevs.h" pccarddevs.h standard \ dependency "$S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ compile-with "${AWK} -f $S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ no-obj no-implicit-rule before-depend \ clean "pccarddevs.h" kbdmuxmap.h optional kbdmux_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${KBDMUX_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > kbdmuxmap.h" \ no-obj no-implicit-rule before-depend \ clean "kbdmuxmap.h" teken_state.h optional sc | vt \ dependency "$S/teken/gensequences $S/teken/sequences" \ compile-with "${AWK} -f $S/teken/gensequences $S/teken/sequences > teken_state.h" \ no-obj no-implicit-rule before-depend \ clean "teken_state.h" usbdevs.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \ no-obj no-implicit-rule before-depend \ clean "usbdevs.h" usbdevs_data.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \ no-obj no-implicit-rule before-depend \ clean "usbdevs_data.h" cam/cam.c optional scbus cam/cam_compat.c optional scbus cam/cam_iosched.c optional scbus cam/cam_periph.c optional scbus cam/cam_queue.c optional scbus cam/cam_sim.c optional scbus cam/cam_xpt.c optional scbus cam/ata/ata_all.c optional scbus cam/ata/ata_xpt.c optional scbus cam/ata/ata_pmp.c optional scbus cam/nvme/nvme_all.c optional scbus nvme !nvd cam/nvme/nvme_da.c optional scbus nvme da !nvd cam/nvme/nvme_xpt.c optional scbus nvme !nvd cam/scsi/scsi_xpt.c optional scbus cam/scsi/scsi_all.c optional scbus cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch cam/ata/ata_da.c optional ada | da cam/ctl/ctl.c optional ctl cam/ctl/ctl_backend.c optional ctl cam/ctl/ctl_backend_block.c optional ctl cam/ctl/ctl_backend_ramdisk.c optional ctl cam/ctl/ctl_cmd_table.c optional ctl cam/ctl/ctl_frontend.c optional ctl cam/ctl/ctl_frontend_cam_sim.c optional ctl cam/ctl/ctl_frontend_ioctl.c optional ctl cam/ctl/ctl_frontend_iscsi.c optional ctl cam/ctl/ctl_ha.c optional ctl cam/ctl/ctl_scsi_all.c optional ctl cam/ctl/ctl_tpc.c optional ctl cam/ctl/ctl_tpc_local.c optional ctl cam/ctl/ctl_error.c optional ctl cam/ctl/ctl_util.c optional ctl cam/ctl/scsi_ctl.c optional ctl cam/scsi/scsi_da.c optional da cam/scsi/scsi_low.c optional ct | ncv | nsp | stg cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_pt.c optional pt cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_enc.c optional ses cam/scsi/scsi_enc_ses.c optional ses cam/scsi/scsi_enc_safte.c optional ses cam/scsi/scsi_sg.c optional sg cam/scsi/scsi_targ_bh.c optional targbh cam/scsi/scsi_target.c optional targ cam/scsi/smp_all.c optional scbus # shared between zfs and dtrace cddl/compat/opensolaris/kern/opensolaris.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_cmn_err.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_kmem.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_misc.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_proc.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_sunddi.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_taskq.c optional zfs | dtrace compile-with "${CDDL_C}" # zfs specific cddl/compat/opensolaris/kern/opensolaris_acl.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_dtrace.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kobj.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kstat.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_lookup.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_policy.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_string.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_sysevent.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_uio.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_vfs.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_vm.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_zone.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/acl/acl_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/avl/avl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_fnvpair.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_nvpair.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_nvpair_alloc_fixed.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/unicode/u8_textprep.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfeature_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_comutil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_fletcher.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_ioctl_compat.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_namecheck.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zpool_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zprop_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/gfs.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/vnode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/blkptr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bplist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bptree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bqueue.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt_zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c optional zfs compile-with "${ZFS_C}" \ warning "kernel contains CDDL licensed ZFS filesystem" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_bookmark.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_userhold.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_synctask.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/gzip.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lz4.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/multilist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/range_tree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sha256.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/skein_zfs.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/space_reftree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/trim_map.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/uberblock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/unique.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfeature.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_checksum.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_compress.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_inject.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zle.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zrlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/callb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/list.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/nvpair_alloc_system.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/adler32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/deflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inffast.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inftrees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/opensolaris_crc32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/trees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod_subr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zutil.c optional zfs compile-with "${ZFS_C}" # dtrace specific cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c optional dtrace compile-with "${DTRACE_C}" \ warning "kernel contains CDDL licensed DTRACE" cddl/dev/dtmalloc/dtmalloc.c optional dtmalloc | dtraceall compile-with "${CDDL_C}" cddl/dev/profile/profile.c optional dtrace_profile | dtraceall compile-with "${CDDL_C}" cddl/dev/sdt/sdt.c optional dtrace_sdt | dtraceall compile-with "${CDDL_C}" cddl/dev/fbt/fbt.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/systrace/systrace.c optional dtrace_systrace | dtraceall compile-with "${CDDL_C}" cddl/dev/prototype.c optional dtrace_prototype | dtraceall compile-with "${CDDL_C}" fs/nfsclient/nfs_clkdtrace.c optional dtnfscl nfscl | dtraceall nfscl compile-with "${CDDL_C}" compat/cloudabi/cloudabi_clock.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_errno.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_fd.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_file.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_futex.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_mem.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_proc.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_random.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_sock.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_thread.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_vdso.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi32/cloudabi32_fd.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_module.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_poll.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_sock.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_syscalls.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_sysent.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_thread.c optional compat_cloudabi32 compat/cloudabi64/cloudabi64_fd.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_module.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_poll.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_sock.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_syscalls.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_sysent.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_thread.c optional compat_cloudabi64 compat/freebsd32/freebsd32_capability.c optional compat_freebsd32 compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32 compat/freebsd32/freebsd32_misc.c optional compat_freebsd32 compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32 compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32 contrib/ck/src/ck_array.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_centralized.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_combining.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_dissemination.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_mcs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_tournament.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_epoch.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hp.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_ht.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_rhs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/dev/acpica/common/ahids.c optional acpi acpi_debug contrib/dev/acpica/common/ahuuids.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbcmds.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbconvert.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbdisply.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbexec.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbhistry.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbinput.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbmethod.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbnames.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbobject.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbstats.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbtest.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbutils.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbxface.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmbuffer.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmcstyle.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmdeferred.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmnames.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmopcode.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrc.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl2.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcs.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmutils.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmwalk.c optional acpi acpi_debug contrib/dev/acpica/components/dispatcher/dsargs.c optional acpi contrib/dev/acpica/components/dispatcher/dscontrol.c optional acpi contrib/dev/acpica/components/dispatcher/dsdebug.c optional acpi contrib/dev/acpica/components/dispatcher/dsfield.c optional acpi contrib/dev/acpica/components/dispatcher/dsinit.c optional acpi contrib/dev/acpica/components/dispatcher/dsmethod.c optional acpi contrib/dev/acpica/components/dispatcher/dsmthdat.c optional acpi contrib/dev/acpica/components/dispatcher/dsobject.c optional acpi contrib/dev/acpica/components/dispatcher/dsopcode.c optional acpi contrib/dev/acpica/components/dispatcher/dsutils.c optional acpi contrib/dev/acpica/components/dispatcher/dswexec.c optional acpi contrib/dev/acpica/components/dispatcher/dswload.c optional acpi contrib/dev/acpica/components/dispatcher/dswload2.c optional acpi contrib/dev/acpica/components/dispatcher/dswscope.c optional acpi contrib/dev/acpica/components/dispatcher/dswstate.c optional acpi contrib/dev/acpica/components/events/evevent.c optional acpi contrib/dev/acpica/components/events/evglock.c optional acpi contrib/dev/acpica/components/events/evgpe.c optional acpi contrib/dev/acpica/components/events/evgpeblk.c optional acpi contrib/dev/acpica/components/events/evgpeinit.c optional acpi contrib/dev/acpica/components/events/evgpeutil.c optional acpi contrib/dev/acpica/components/events/evhandler.c optional acpi contrib/dev/acpica/components/events/evmisc.c optional acpi contrib/dev/acpica/components/events/evregion.c optional acpi contrib/dev/acpica/components/events/evrgnini.c optional acpi contrib/dev/acpica/components/events/evsci.c optional acpi contrib/dev/acpica/components/events/evxface.c optional acpi contrib/dev/acpica/components/events/evxfevnt.c optional acpi contrib/dev/acpica/components/events/evxfgpe.c optional acpi contrib/dev/acpica/components/events/evxfregn.c optional acpi contrib/dev/acpica/components/executer/exconcat.c optional acpi contrib/dev/acpica/components/executer/exconfig.c optional acpi contrib/dev/acpica/components/executer/exconvrt.c optional acpi contrib/dev/acpica/components/executer/excreate.c optional acpi contrib/dev/acpica/components/executer/exdebug.c optional acpi contrib/dev/acpica/components/executer/exdump.c optional acpi contrib/dev/acpica/components/executer/exfield.c optional acpi contrib/dev/acpica/components/executer/exfldio.c optional acpi contrib/dev/acpica/components/executer/exmisc.c optional acpi contrib/dev/acpica/components/executer/exmutex.c optional acpi contrib/dev/acpica/components/executer/exnames.c optional acpi contrib/dev/acpica/components/executer/exoparg1.c optional acpi contrib/dev/acpica/components/executer/exoparg2.c optional acpi contrib/dev/acpica/components/executer/exoparg3.c optional acpi contrib/dev/acpica/components/executer/exoparg6.c optional acpi contrib/dev/acpica/components/executer/exprep.c optional acpi contrib/dev/acpica/components/executer/exregion.c optional acpi contrib/dev/acpica/components/executer/exresnte.c optional acpi contrib/dev/acpica/components/executer/exresolv.c optional acpi contrib/dev/acpica/components/executer/exresop.c optional acpi contrib/dev/acpica/components/executer/exstore.c optional acpi contrib/dev/acpica/components/executer/exstoren.c optional acpi contrib/dev/acpica/components/executer/exstorob.c optional acpi contrib/dev/acpica/components/executer/exsystem.c optional acpi contrib/dev/acpica/components/executer/extrace.c optional acpi contrib/dev/acpica/components/executer/exutils.c optional acpi contrib/dev/acpica/components/hardware/hwacpi.c optional acpi contrib/dev/acpica/components/hardware/hwesleep.c optional acpi contrib/dev/acpica/components/hardware/hwgpe.c optional acpi contrib/dev/acpica/components/hardware/hwpci.c optional acpi contrib/dev/acpica/components/hardware/hwregs.c optional acpi contrib/dev/acpica/components/hardware/hwsleep.c optional acpi contrib/dev/acpica/components/hardware/hwtimer.c optional acpi contrib/dev/acpica/components/hardware/hwvalid.c optional acpi contrib/dev/acpica/components/hardware/hwxface.c optional acpi contrib/dev/acpica/components/hardware/hwxfsleep.c optional acpi contrib/dev/acpica/components/namespace/nsaccess.c optional acpi contrib/dev/acpica/components/namespace/nsalloc.c optional acpi contrib/dev/acpica/components/namespace/nsarguments.c optional acpi contrib/dev/acpica/components/namespace/nsconvert.c optional acpi contrib/dev/acpica/components/namespace/nsdump.c optional acpi contrib/dev/acpica/components/namespace/nseval.c optional acpi contrib/dev/acpica/components/namespace/nsinit.c optional acpi contrib/dev/acpica/components/namespace/nsload.c optional acpi contrib/dev/acpica/components/namespace/nsnames.c optional acpi contrib/dev/acpica/components/namespace/nsobject.c optional acpi contrib/dev/acpica/components/namespace/nsparse.c optional acpi contrib/dev/acpica/components/namespace/nspredef.c optional acpi contrib/dev/acpica/components/namespace/nsprepkg.c optional acpi contrib/dev/acpica/components/namespace/nsrepair.c optional acpi contrib/dev/acpica/components/namespace/nsrepair2.c optional acpi contrib/dev/acpica/components/namespace/nssearch.c optional acpi contrib/dev/acpica/components/namespace/nsutils.c optional acpi contrib/dev/acpica/components/namespace/nswalk.c optional acpi contrib/dev/acpica/components/namespace/nsxfeval.c optional acpi contrib/dev/acpica/components/namespace/nsxfname.c optional acpi contrib/dev/acpica/components/namespace/nsxfobj.c optional acpi contrib/dev/acpica/components/parser/psargs.c optional acpi contrib/dev/acpica/components/parser/psloop.c optional acpi contrib/dev/acpica/components/parser/psobject.c optional acpi contrib/dev/acpica/components/parser/psopcode.c optional acpi contrib/dev/acpica/components/parser/psopinfo.c optional acpi contrib/dev/acpica/components/parser/psparse.c optional acpi contrib/dev/acpica/components/parser/psscope.c optional acpi contrib/dev/acpica/components/parser/pstree.c optional acpi contrib/dev/acpica/components/parser/psutils.c optional acpi contrib/dev/acpica/components/parser/pswalk.c optional acpi contrib/dev/acpica/components/parser/psxface.c optional acpi contrib/dev/acpica/components/resources/rsaddr.c optional acpi contrib/dev/acpica/components/resources/rscalc.c optional acpi contrib/dev/acpica/components/resources/rscreate.c optional acpi contrib/dev/acpica/components/resources/rsdump.c optional acpi acpi_debug contrib/dev/acpica/components/resources/rsdumpinfo.c optional acpi contrib/dev/acpica/components/resources/rsinfo.c optional acpi contrib/dev/acpica/components/resources/rsio.c optional acpi contrib/dev/acpica/components/resources/rsirq.c optional acpi contrib/dev/acpica/components/resources/rslist.c optional acpi contrib/dev/acpica/components/resources/rsmemory.c optional acpi contrib/dev/acpica/components/resources/rsmisc.c optional acpi contrib/dev/acpica/components/resources/rsserial.c optional acpi contrib/dev/acpica/components/resources/rsutils.c optional acpi contrib/dev/acpica/components/resources/rsxface.c optional acpi contrib/dev/acpica/components/tables/tbdata.c optional acpi contrib/dev/acpica/components/tables/tbfadt.c optional acpi contrib/dev/acpica/components/tables/tbfind.c optional acpi contrib/dev/acpica/components/tables/tbinstal.c optional acpi contrib/dev/acpica/components/tables/tbprint.c optional acpi contrib/dev/acpica/components/tables/tbutils.c optional acpi contrib/dev/acpica/components/tables/tbxface.c optional acpi contrib/dev/acpica/components/tables/tbxfload.c optional acpi contrib/dev/acpica/components/tables/tbxfroot.c optional acpi contrib/dev/acpica/components/utilities/utaddress.c optional acpi contrib/dev/acpica/components/utilities/utalloc.c optional acpi contrib/dev/acpica/components/utilities/utascii.c optional acpi contrib/dev/acpica/components/utilities/utbuffer.c optional acpi contrib/dev/acpica/components/utilities/utcache.c optional acpi contrib/dev/acpica/components/utilities/utcopy.c optional acpi contrib/dev/acpica/components/utilities/utdebug.c optional acpi contrib/dev/acpica/components/utilities/utdecode.c optional acpi contrib/dev/acpica/components/utilities/utdelete.c optional acpi contrib/dev/acpica/components/utilities/uterror.c optional acpi contrib/dev/acpica/components/utilities/uteval.c optional acpi contrib/dev/acpica/components/utilities/utexcep.c optional acpi contrib/dev/acpica/components/utilities/utglobal.c optional acpi contrib/dev/acpica/components/utilities/uthex.c optional acpi contrib/dev/acpica/components/utilities/utids.c optional acpi contrib/dev/acpica/components/utilities/utinit.c optional acpi contrib/dev/acpica/components/utilities/utlock.c optional acpi contrib/dev/acpica/components/utilities/utmath.c optional acpi contrib/dev/acpica/components/utilities/utmisc.c optional acpi contrib/dev/acpica/components/utilities/utmutex.c optional acpi contrib/dev/acpica/components/utilities/utnonansi.c optional acpi contrib/dev/acpica/components/utilities/utobject.c optional acpi contrib/dev/acpica/components/utilities/utosi.c optional acpi contrib/dev/acpica/components/utilities/utownerid.c optional acpi contrib/dev/acpica/components/utilities/utpredef.c optional acpi contrib/dev/acpica/components/utilities/utresrc.c optional acpi contrib/dev/acpica/components/utilities/utstate.c optional acpi contrib/dev/acpica/components/utilities/utstring.c optional acpi contrib/dev/acpica/components/utilities/utstrtoul64.c optional acpi contrib/dev/acpica/components/utilities/utuuid.c optional acpi acpi_debug contrib/dev/acpica/components/utilities/utxface.c optional acpi contrib/dev/acpica/components/utilities/utxferror.c optional acpi contrib/dev/acpica/components/utilities/utxfinit.c optional acpi contrib/dev/acpica/os_specific/service_layers/osgendbg.c optional acpi acpi_debug contrib/ipfilter/netinet/fil.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_auth.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_frag.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_log.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_proxy.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_state.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_lookup.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -Wno-error -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_pool.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_htable.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_sync.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat6.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_rules.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_scan.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_dstlist.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/radix_ipf.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/libfdt/fdt.c optional fdt contrib/libfdt/fdt_ro.c optional fdt contrib/libfdt/fdt_rw.c optional fdt contrib/libfdt/fdt_strerror.c optional fdt contrib/libfdt/fdt_sw.c optional fdt contrib/libfdt/fdt_wip.c optional fdt contrib/libnv/cnvlist.c standard contrib/libnv/dnvlist.c standard contrib/libnv/nvlist.c standard contrib/libnv/nvpair.c standard contrib/ngatm/netnatm/api/cc_conn.c optional ngatm_ccatm \ compile-with "${NORMAL_C_NOWERROR} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_data.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_dump.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_port.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_sig.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_user.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/unisap.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/straddr.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/unimsg_common.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/traffic.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_ie.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_msg.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_call.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_coord.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_party.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_print.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_reset.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" crypto/blowfish/bf_ecb.c optional ipsec crypto/blowfish/bf_skey.c optional crypto | ipsec crypto/camellia/camellia.c optional crypto | ipsec crypto/camellia/camellia-api.c optional crypto | ipsec crypto/des/des_ecb.c optional crypto | ipsec | netsmb crypto/des/des_setkey.c optional crypto | ipsec | netsmb crypto/rc4/rc4.c optional netgraph_mppc_encryption | kgssapi crypto/rijndael/rijndael-alg-fst.c optional crypto | ekcd | geom_bde | \ ipsec | random !random_loadable | wlan_ccmp crypto/rijndael/rijndael-api-fst.c optional ekcd | geom_bde | random !random_loadable crypto/rijndael/rijndael-api.c optional crypto | ipsec | wlan_ccmp crypto/sha1.c optional carp | crypto | ipsec | \ netgraph_mppc_encryption | sctp crypto/sha2/sha256c.c optional crypto | ekcd | geom_bde | ipsec | random !random_loadable | \ sctp | zfs crypto/sha2/sha512c.c optional crypto | geom_bde | ipsec | zfs crypto/skein/skein.c optional crypto | zfs crypto/skein/skein_block.c optional crypto | zfs crypto/siphash/siphash.c optional inet | inet6 crypto/siphash/siphash_test.c optional inet | inet6 ddb/db_access.c optional ddb ddb/db_break.c optional ddb ddb/db_capture.c optional ddb ddb/db_command.c optional ddb ddb/db_examine.c optional ddb ddb/db_expr.c optional ddb ddb/db_input.c optional ddb ddb/db_lex.c optional ddb ddb/db_main.c optional ddb ddb/db_output.c optional ddb ddb/db_print.c optional ddb ddb/db_ps.c optional ddb ddb/db_run.c optional ddb ddb/db_script.c optional ddb ddb/db_sym.c optional ddb ddb/db_thread.c optional ddb ddb/db_textdump.c optional ddb ddb/db_variables.c optional ddb ddb/db_watch.c optional ddb ddb/db_write_cmd.c optional ddb dev/aac/aac.c optional aac dev/aac/aac_cam.c optional aacp aac dev/aac/aac_debug.c optional aac dev/aac/aac_disk.c optional aac dev/aac/aac_linux.c optional aac compat_linux dev/aac/aac_pci.c optional aac pci dev/aacraid/aacraid.c optional aacraid dev/aacraid/aacraid_cam.c optional aacraid scbus dev/aacraid/aacraid_debug.c optional aacraid dev/aacraid/aacraid_linux.c optional aacraid compat_linux dev/aacraid/aacraid_pci.c optional aacraid pci dev/acpi_support/acpi_wmi.c optional acpi_wmi acpi dev/acpi_support/acpi_asus.c optional acpi_asus acpi dev/acpi_support/acpi_asus_wmi.c optional acpi_asus_wmi acpi dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi dev/acpi_support/acpi_hp.c optional acpi_hp acpi dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi dev/acpi_support/acpi_sony.c optional acpi_sony acpi dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi dev/acpi_support/atk0110.c optional aibs acpi dev/acpica/Osd/OsdDebug.c optional acpi dev/acpica/Osd/OsdHardware.c optional acpi dev/acpica/Osd/OsdInterrupt.c optional acpi dev/acpica/Osd/OsdMemory.c optional acpi dev/acpica/Osd/OsdSchedule.c optional acpi dev/acpica/Osd/OsdStream.c optional acpi dev/acpica/Osd/OsdSynch.c optional acpi dev/acpica/Osd/OsdTable.c optional acpi dev/acpica/acpi.c optional acpi dev/acpica/acpi_acad.c optional acpi dev/acpica/acpi_battery.c optional acpi dev/acpica/acpi_button.c optional acpi dev/acpica/acpi_cmbat.c optional acpi dev/acpica/acpi_cpu.c optional acpi dev/acpica/acpi_ec.c optional acpi dev/acpica/acpi_isab.c optional acpi isa dev/acpica/acpi_lid.c optional acpi dev/acpica/acpi_package.c optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_perf.c optional acpi dev/acpica/acpi_powerres.c optional acpi dev/acpica/acpi_quirk.c optional acpi dev/acpica/acpi_resource.c optional acpi dev/acpica/acpi_smbat.c optional acpi dev/acpica/acpi_thermal.c optional acpi dev/acpica/acpi_throttle.c optional acpi dev/acpica/acpi_video.c optional acpi_video acpi dev/acpica/acpi_dock.c optional acpi_dock acpi dev/adlink/adlink.c optional adlink dev/advansys/adv_eisa.c optional adv eisa dev/advansys/adv_pci.c optional adv pci dev/advansys/advansys.c optional adv dev/advansys/advlib.c optional adv dev/advansys/advmcode.c optional adv dev/advansys/adw_pci.c optional adw pci dev/advansys/adwcam.c optional adw dev/advansys/adwlib.c optional adw dev/advansys/adwmcode.c optional adw dev/ae/if_ae.c optional ae pci dev/age/if_age.c optional age pci dev/agp/agp.c optional agp pci dev/agp/agp_if.m optional agp pci dev/aha/aha.c optional aha dev/aha/aha_isa.c optional aha isa dev/aha/aha_mca.c optional aha mca dev/ahb/ahb.c optional ahb eisa dev/ahci/ahci.c optional ahci dev/ahci/ahciem.c optional ahci dev/ahci/ahci_pci.c optional ahci pci dev/aic/aic.c optional aic dev/aic/aic_pccard.c optional aic pccard dev/aic7xxx/ahc_eisa.c optional ahc eisa dev/aic7xxx/ahc_isa.c optional ahc isa dev/aic7xxx/ahc_pci.c optional ahc pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/ahd_pci.c optional ahd pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/aic7770.c optional ahc dev/aic7xxx/aic79xx.c optional ahd pci dev/aic7xxx/aic79xx_osm.c optional ahd pci dev/aic7xxx/aic79xx_pci.c optional ahd pci dev/aic7xxx/aic79xx_reg_print.c optional ahd pci ahd_reg_pretty_print dev/aic7xxx/aic7xxx.c optional ahc dev/aic7xxx/aic7xxx_93cx6.c optional ahc dev/aic7xxx/aic7xxx_osm.c optional ahc dev/aic7xxx/aic7xxx_pci.c optional ahc pci dev/aic7xxx/aic7xxx_reg_print.c optional ahc ahc_reg_pretty_print dev/al_eth/al_eth.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/al_eth/al_init_eth_lm.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/al_eth/al_init_eth_kr.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_iofic.c optional al_iofic \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_25g.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_hssp.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_config.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_debug.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_iofic.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_main.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_serdes.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_kr.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_main.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/alc/if_alc.c optional alc pci dev/ale/if_ale.c optional ale pci dev/alpm/alpm.c optional alpm pci dev/altera/avgen/altera_avgen.c optional altera_avgen dev/altera/avgen/altera_avgen_fdt.c optional altera_avgen fdt dev/altera/avgen/altera_avgen_nexus.c optional altera_avgen dev/altera/sdcard/altera_sdcard.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_disk.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_io.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_fdt.c optional altera_sdcard fdt dev/altera/sdcard/altera_sdcard_nexus.c optional altera_sdcard dev/altera/pio/pio.c optional altera_pio dev/altera/pio/pio_if.m optional altera_pio dev/amdpm/amdpm.c optional amdpm pci | nfpm pci dev/amdsmb/amdsmb.c optional amdsmb pci dev/amr/amr.c optional amr dev/amr/amr_cam.c optional amrp amr dev/amr/amr_disk.c optional amr dev/amr/amr_linux.c optional amr compat_linux dev/amr/amr_pci.c optional amr pci dev/an/if_an.c optional an dev/an/if_an_isa.c optional an isa dev/an/if_an_pccard.c optional an pccard dev/an/if_an_pci.c optional an pci # dev/ata/ata_if.m optional ata | atacore dev/ata/ata-all.c optional ata | atacore dev/ata/ata-dma.c optional ata | atacore dev/ata/ata-lowlevel.c optional ata | atacore dev/ata/ata-sata.c optional ata | atacore dev/ata/ata-card.c optional ata pccard | atapccard dev/ata/ata-cbus.c optional ata pc98 | atapc98 dev/ata/ata-isa.c optional ata isa | ataisa dev/ata/ata-pci.c optional ata pci | atapci dev/ata/chipsets/ata-acard.c optional ata pci | ataacard dev/ata/chipsets/ata-acerlabs.c optional ata pci | ataacerlabs dev/ata/chipsets/ata-amd.c optional ata pci | ataamd dev/ata/chipsets/ata-ati.c optional ata pci | ataati dev/ata/chipsets/ata-cenatek.c optional ata pci | atacenatek dev/ata/chipsets/ata-cypress.c optional ata pci | atacypress dev/ata/chipsets/ata-cyrix.c optional ata pci | atacyrix dev/ata/chipsets/ata-highpoint.c optional ata pci | atahighpoint dev/ata/chipsets/ata-intel.c optional ata pci | ataintel dev/ata/chipsets/ata-ite.c optional ata pci | ataite dev/ata/chipsets/ata-jmicron.c optional ata pci | atajmicron dev/ata/chipsets/ata-marvell.c optional ata pci | atamarvell dev/ata/chipsets/ata-micron.c optional ata pci | atamicron dev/ata/chipsets/ata-national.c optional ata pci | atanational dev/ata/chipsets/ata-netcell.c optional ata pci | atanetcell dev/ata/chipsets/ata-nvidia.c optional ata pci | atanvidia dev/ata/chipsets/ata-promise.c optional ata pci | atapromise dev/ata/chipsets/ata-serverworks.c optional ata pci | ataserverworks dev/ata/chipsets/ata-siliconimage.c optional ata pci | atasiliconimage | ataati dev/ata/chipsets/ata-sis.c optional ata pci | atasis dev/ata/chipsets/ata-via.c optional ata pci | atavia # dev/ath/if_ath_pci.c optional ath_pci pci \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath_ahb.c optional ath_ahb \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_alq.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_beacon.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_btcoex.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_btcoex_mci.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_debug.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_descdma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_keycache.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_ioctl.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_led.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_lna_div.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx_edma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx_ht.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tdma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_sysctl.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_rx.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_rx_edma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_spectral.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ah_osdep.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/ath_hal/ah.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v1.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v3.c optional ath_hal | ath_ar5211 | ath_ar5212 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v14.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v4k.c \ optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_9287.c \ optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_regdomain.c optional ath \ compile-with "${NORMAL_C} ${NO_WSHIFT_COUNT_NEGATIVE} ${NO_WSHIFT_COUNT_OVERFLOW} -I$S/dev/ath" # ar5210 dev/ath/ath_hal/ar5210/ar5210_attach.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_beacon.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_interrupts.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_keycache.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_misc.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_phy.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_power.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_recv.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_reset.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_xmit.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5211 dev/ath/ath_hal/ar5211/ar5211_attach.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_beacon.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_interrupts.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_keycache.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_misc.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_phy.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_power.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_recv.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_reset.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_xmit.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5212 dev/ath/ath_hal/ar5212/ar5212_ani.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_attach.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_beacon.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_eeprom.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_gpio.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_interrupts.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_keycache.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_misc.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_phy.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_power.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_recv.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_reset.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_rfgain.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_xmit.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5416 (depends on ar5212) dev/ath/ath_hal/ar5416/ar5416_ani.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_attach.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_beacon.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_btcoex.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_iq.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcgain.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcdc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_eeprom.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_gpio.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_interrupts.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_keycache.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_misc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_phy.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_power.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_radar.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_recv.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_reset.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_spectral.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_xmit.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9130 (depends upon ar5416) - also requires AH_SUPPORT_AR9130 # # Since this is an embedded MAC SoC, there's no need to compile it into the # default HAL. dev/ath/ath_hal/ar9001/ar9130_attach.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_phy.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_eeprom.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9160 (depends on ar5416) dev/ath/ath_hal/ar9001/ar9160_attach.c optional ath_hal | ath_ar9160 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9280 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9280_attach.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280_olc.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9285 (depends on ar5416 and ar9280) dev/ath/ath_hal/ar9002/ar9285_attach.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_btcoex.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_reset.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_cal.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_phy.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_diversity.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9287 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9287_attach.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_reset.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_cal.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_olc.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9300 contrib/dev/ath/ath_hal/ar9300/ar9300_ani.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_attach.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_beacon.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_eeprom.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WCONSTANT_CONVERSION}" contrib/dev/ath/ath_hal/ar9300/ar9300_freebsd.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_gpio.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_interrupts.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_keycache.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_mci.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_misc.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_paprd.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_phy.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_power.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radar.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radio.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv_ds.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_reset.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WSOMETIMES_UNINITIALIZED} -Wno-unused-function" contrib/dev/ath/ath_hal/ar9300/ar9300_stub.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_stub_funcs.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_spectral.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_timer.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit_ds.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" # rf backends dev/ath/ath_hal/ar5212/ar2316.c optional ath_rf2316 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2317.c optional ath_rf2317 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2413.c optional ath_hal | ath_rf2413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2425.c optional ath_hal | ath_rf2425 | ath_rf2417 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5111.c optional ath_hal | ath_rf5111 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5112.c optional ath_hal | ath_rf5112 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5413.c optional ath_hal | ath_rf5413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar2133.c optional ath_hal | ath_ar5416 | \ ath_ar9130 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280.c optional ath_hal | ath_ar9280 | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ath rate control algorithms dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \ compile-with "${NORMAL_C} -I$S/dev/ath" # ath DFS modules dev/ath/ath_dfs/null/dfs_null.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/bce/if_bce.c optional bce dev/bfe/if_bfe.c optional bfe dev/bge/if_bge.c optional bge dev/bhnd/bhnd.c optional bhnd dev/bhnd/bhnd_erom.c optional bhnd dev/bhnd/bhnd_erom_if.m optional bhnd dev/bhnd/bhnd_nexus.c optional bhnd siba_nexus | \ bhnd bcma_nexus dev/bhnd/bhnd_subr.c optional bhnd dev/bhnd/bhnd_bus_if.m optional bhnd dev/bhnd/bhndb/bhnd_bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_bus_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_hwdata.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_pci.c optional bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_hwdata.c optional bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_sprom.c optional bhndb bhnd pci dev/bhnd/bhndb/bhndb_subr.c optional bhndb bhnd dev/bhnd/bcma/bcma.c optional bcma bhnd dev/bhnd/bcma/bcma_bhndb.c optional bcma bhnd bhndb dev/bhnd/bcma/bcma_erom.c optional bcma bhnd dev/bhnd/bcma/bcma_nexus.c optional bcma_nexus bcma bhnd dev/bhnd/bcma/bcma_subr.c optional bcma bhnd dev/bhnd/cores/chipc/bhnd_chipc_if.m optional bhnd dev/bhnd/cores/chipc/bhnd_sprom_chipc.c optional bhnd dev/bhnd/cores/chipc/bhnd_pmu_chipc.c optional bhnd dev/bhnd/cores/chipc/chipc.c optional bhnd dev/bhnd/cores/chipc/chipc_cfi.c optional bhnd cfi dev/bhnd/cores/chipc/chipc_slicer.c optional bhnd cfi | bhnd spibus dev/bhnd/cores/chipc/chipc_spi.c optional bhnd spibus dev/bhnd/cores/chipc/chipc_subr.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_subr.c optional bhnd dev/bhnd/cores/pci/bhnd_pci.c optional bhnd pci dev/bhnd/cores/pci/bhnd_pci_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pci/bhnd_pcib.c optional bhnd_pcib bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2.c optional bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2b.c optional bhnd_pcie2b bhnd pci dev/bhnd/cores/pmu/bhnd_pmu.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_core.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_if.m optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcm.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcmraw.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_btxt.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_tlv.c optional bhnd dev/bhnd/nvram/bhnd_nvram_if.m optional bhnd dev/bhnd/nvram/bhnd_nvram_io.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iobuf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_ioptr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iores.c optional bhnd dev/bhnd/nvram/bhnd_nvram_plist.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_fmts.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_prf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_subr.c optional bhnd dev/bhnd/nvram/bhnd_sprom.c optional bhnd dev/bhnd/siba/siba.c optional siba bhnd dev/bhnd/siba/siba_bhndb.c optional siba bhnd bhndb dev/bhnd/siba/siba_erom.c optional siba bhnd dev/bhnd/siba/siba_nexus.c optional siba_nexus siba bhnd dev/bhnd/siba/siba_subr.c optional siba bhnd # dev/bktr/bktr_audio.c optional bktr pci dev/bktr/bktr_card.c optional bktr pci dev/bktr/bktr_core.c optional bktr pci dev/bktr/bktr_i2c.c optional bktr pci smbus dev/bktr/bktr_os.c optional bktr pci dev/bktr/bktr_tuner.c optional bktr pci dev/bktr/msp34xx.c optional bktr pci dev/bnxt/bnxt_hwrm.c optional bnxt iflib pci dev/bnxt/bnxt_sysctl.c optional bnxt iflib pci dev/bnxt/bnxt_txrx.c optional bnxt iflib pci dev/bnxt/if_bnxt.c optional bnxt iflib pci dev/buslogic/bt.c optional bt dev/buslogic/bt_eisa.c optional bt eisa dev/buslogic/bt_isa.c optional bt isa dev/buslogic/bt_mca.c optional bt mca dev/buslogic/bt_pci.c optional bt pci dev/bwi/bwimac.c optional bwi dev/bwi/bwiphy.c optional bwi dev/bwi/bwirf.c optional bwi dev/bwi/if_bwi.c optional bwi dev/bwi/if_bwi_pci.c optional bwi pci # XXX Work around clang warnings, until maintainer approves fix. dev/bwn/if_bwn.c optional bwn siba_bwn \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/bwn/if_bwn_pci.c optional bwn pci bhnd dev/bwn/if_bwn_phy_common.c optional bwn siba_bwn dev/bwn/if_bwn_phy_g.c optional bwn siba_bwn \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED} ${NO_WCONSTANT_CONVERSION}" dev/bwn/if_bwn_phy_lp.c optional bwn siba_bwn \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/bwn/if_bwn_phy_n.c optional bwn siba_bwn dev/bwn/if_bwn_util.c optional bwn siba_bwn dev/bwn/bwn_mac.c optional bwn bhnd dev/cardbus/cardbus.c optional cardbus dev/cardbus/cardbus_cis.c optional cardbus dev/cardbus/cardbus_device.c optional cardbus dev/cas/if_cas.c optional cas dev/cfi/cfi_bus_fdt.c optional cfi fdt dev/cfi/cfi_bus_nexus.c optional cfi dev/cfi/cfi_core.c optional cfi dev/cfi/cfi_dev.c optional cfi dev/cfi/cfi_disk.c optional cfid dev/chromebook_platform/chromebook_platform.c optional chromebook_platform dev/ciss/ciss.c optional ciss dev/cm/smc90cx6.c optional cm dev/cmx/cmx.c optional cmx dev/cmx/cmx_pccard.c optional cmx pccard dev/cpufreq/ichss.c optional cpufreq pci dev/cs/if_cs.c optional cs dev/cs/if_cs_isa.c optional cs isa dev/cs/if_cs_pccard.c optional cs pccard dev/cxgb/cxgb_main.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_sge.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mc5.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_ael1002.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_aq100x.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_xgmac.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_tn1010.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/sys/uipc_mvec.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgbe/t4_if.m optional cxgbe pci dev/cxgbe/t4_iov.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_mp_ring.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_main.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_netmap.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sge.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_l2t.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_tracer.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_vf.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4_hw.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" t4fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t4fw_cfg.fw:t4fw_cfg t4fw_cfg_uwire.fw:t4fw_cfg_uwire t4fw.fw:t4fw -mt4fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t4fw_cfg.c" t4fw_cfg.fwo optional cxgbe \ dependency "t4fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg.fwo" t4fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg.fw" t4fw_cfg_uwire.fwo optional cxgbe \ dependency "t4fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg_uwire.fwo" t4fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg_uwire.fw" t4fw.fwo optional cxgbe \ dependency "t4fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw.fwo" t4fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw-1.16.26.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t4fw.fw" t5fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t5fw_cfg.fw:t5fw_cfg t5fw.fw:t5fw -mt5fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t5fw_cfg.c" t5fw_cfg.fwo optional cxgbe \ dependency "t5fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw_cfg.fwo" t5fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw_cfg.fw" t5fw.fwo optional cxgbe \ dependency "t5fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw.fwo" t5fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw-1.16.26.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t5fw.fw" t6fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t6fw_cfg.fw:t6fw_cfg t6fw.fw:t6fw -mt6fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t6fw_cfg.c" t6fw_cfg.fwo optional cxgbe \ dependency "t6fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw_cfg.fwo" t6fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw_cfg.fw" t6fw.fwo optional cxgbe \ dependency "t6fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw.fwo" t6fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw-1.16.26.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t6fw.fw" dev/cy/cy.c optional cy dev/cy/cy_isa.c optional cy isa dev/cy/cy_pci.c optional cy pci dev/cyapa/cyapa.c optional cyapa iicbus dev/dc/if_dc.c optional dc pci dev/dc/dcphy.c optional dc pci dev/dc/pnphy.c optional dc pci dev/dcons/dcons.c optional dcons dev/dcons/dcons_crom.c optional dcons_crom dev/dcons/dcons_os.c optional dcons dev/de/if_de.c optional de pci dev/dme/if_dme.c optional dme dev/dpt/dpt_eisa.c optional dpt eisa dev/dpt/dpt_pci.c optional dpt pci dev/dpt/dpt_scsi.c optional dpt dev/drm/ati_pcigart.c optional drm dev/drm/drm_agpsupport.c optional drm dev/drm/drm_auth.c optional drm dev/drm/drm_bufs.c optional drm dev/drm/drm_context.c optional drm dev/drm/drm_dma.c optional drm dev/drm/drm_drawable.c optional drm dev/drm/drm_drv.c optional drm dev/drm/drm_fops.c optional drm dev/drm/drm_hashtab.c optional drm dev/drm/drm_ioctl.c optional drm dev/drm/drm_irq.c optional drm dev/drm/drm_lock.c optional drm dev/drm/drm_memory.c optional drm dev/drm/drm_mm.c optional drm dev/drm/drm_pci.c optional drm dev/drm/drm_scatter.c optional drm dev/drm/drm_sman.c optional drm dev/drm/drm_sysctl.c optional drm dev/drm/drm_vm.c optional drm dev/drm/i915_dma.c optional i915drm dev/drm/i915_drv.c optional i915drm dev/drm/i915_irq.c optional i915drm dev/drm/i915_mem.c optional i915drm dev/drm/i915_suspend.c optional i915drm dev/drm/mach64_dma.c optional mach64drm dev/drm/mach64_drv.c optional mach64drm dev/drm/mach64_irq.c optional mach64drm dev/drm/mach64_state.c optional mach64drm dev/drm/mga_dma.c optional mgadrm dev/drm/mga_drv.c optional mgadrm dev/drm/mga_irq.c optional mgadrm dev/drm/mga_state.c optional mgadrm dev/drm/mga_warp.c optional mgadrm dev/drm/r128_cce.c optional r128drm \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/drm/r128_drv.c optional r128drm dev/drm/r128_irq.c optional r128drm dev/drm/r128_state.c optional r128drm dev/drm/r300_cmdbuf.c optional radeondrm dev/drm/r600_blit.c optional radeondrm dev/drm/r600_cp.c optional radeondrm \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/drm/radeon_cp.c optional radeondrm \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/drm/radeon_cs.c optional radeondrm dev/drm/radeon_drv.c optional radeondrm dev/drm/radeon_irq.c optional radeondrm dev/drm/radeon_mem.c optional radeondrm dev/drm/radeon_state.c optional radeondrm dev/drm/savage_bci.c optional savagedrm dev/drm/savage_drv.c optional savagedrm dev/drm/savage_state.c optional savagedrm dev/drm/sis_drv.c optional sisdrm dev/drm/sis_ds.c optional sisdrm dev/drm/sis_mm.c optional sisdrm dev/drm/tdfx_drv.c optional tdfxdrm dev/drm/via_dma.c optional viadrm dev/drm/via_dmablit.c optional viadrm dev/drm/via_drv.c optional viadrm dev/drm/via_irq.c optional viadrm dev/drm/via_map.c optional viadrm dev/drm/via_mm.c optional viadrm dev/drm/via_verifier.c optional viadrm dev/drm/via_video.c optional viadrm dev/drm2/drm_agpsupport.c optional drm2 dev/drm2/drm_auth.c optional drm2 dev/drm2/drm_bufs.c optional drm2 dev/drm2/drm_buffer.c optional drm2 dev/drm2/drm_context.c optional drm2 dev/drm2/drm_crtc.c optional drm2 dev/drm2/drm_crtc_helper.c optional drm2 dev/drm2/drm_dma.c optional drm2 dev/drm2/drm_dp_helper.c optional drm2 dev/drm2/drm_dp_iic_helper.c optional drm2 dev/drm2/drm_drv.c optional drm2 dev/drm2/drm_edid.c optional drm2 dev/drm2/drm_fb_helper.c optional drm2 dev/drm2/drm_fops.c optional drm2 dev/drm2/drm_gem.c optional drm2 dev/drm2/drm_gem_names.c optional drm2 dev/drm2/drm_global.c optional drm2 dev/drm2/drm_hashtab.c optional drm2 dev/drm2/drm_ioctl.c optional drm2 dev/drm2/drm_irq.c optional drm2 dev/drm2/drm_linux_list_sort.c optional drm2 dev/drm2/drm_lock.c optional drm2 dev/drm2/drm_memory.c optional drm2 dev/drm2/drm_mm.c optional drm2 dev/drm2/drm_modes.c optional drm2 dev/drm2/drm_pci.c optional drm2 dev/drm2/drm_platform.c optional drm2 dev/drm2/drm_scatter.c optional drm2 dev/drm2/drm_stub.c optional drm2 dev/drm2/drm_sysctl.c optional drm2 dev/drm2/drm_vm.c optional drm2 dev/drm2/drm_os_freebsd.c optional drm2 dev/drm2/ttm/ttm_agp_backend.c optional drm2 dev/drm2/ttm/ttm_lock.c optional drm2 dev/drm2/ttm/ttm_object.c optional drm2 dev/drm2/ttm/ttm_tt.c optional drm2 dev/drm2/ttm/ttm_bo_util.c optional drm2 dev/drm2/ttm/ttm_bo.c optional drm2 dev/drm2/ttm/ttm_bo_manager.c optional drm2 dev/drm2/ttm/ttm_execbuf_util.c optional drm2 dev/drm2/ttm/ttm_memory.c optional drm2 dev/drm2/ttm/ttm_page_alloc.c optional drm2 dev/drm2/ttm/ttm_bo_vm.c optional drm2 dev/drm2/ati_pcigart.c optional drm2 agp pci dev/ed/if_ed.c optional ed dev/ed/if_ed_novell.c optional ed dev/ed/if_ed_rtl80x9.c optional ed dev/ed/if_ed_pccard.c optional ed pccard dev/ed/if_ed_pci.c optional ed pci dev/efidev/efidev.c optional efirt dev/eisa/eisa_if.m standard dev/eisa/eisaconf.c optional eisa dev/e1000/if_em.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/em_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/igb_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_80003es2lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82540.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82541.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82542.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82543.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82571.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82575.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_ich8lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_i210.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_api.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mac.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_manage.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_nvm.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_phy.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_vf.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mbx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_osdep.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/et/if_et.c optional et dev/en/if_en_pci.c optional en pci dev/en/midway.c optional en dev/ep/if_ep.c optional ep dev/ep/if_ep_eisa.c optional ep eisa dev/ep/if_ep_isa.c optional ep isa dev/ep/if_ep_mca.c optional ep mca dev/ep/if_ep_pccard.c optional ep pccard dev/esp/esp_pci.c optional esp pci dev/esp/ncr53c9x.c optional esp dev/etherswitch/arswitch/arswitch.c optional arswitch dev/etherswitch/arswitch/arswitch_reg.c optional arswitch dev/etherswitch/arswitch/arswitch_phy.c optional arswitch dev/etherswitch/arswitch/arswitch_8216.c optional arswitch dev/etherswitch/arswitch/arswitch_8226.c optional arswitch dev/etherswitch/arswitch/arswitch_8316.c optional arswitch dev/etherswitch/arswitch/arswitch_8327.c optional arswitch dev/etherswitch/arswitch/arswitch_7240.c optional arswitch dev/etherswitch/arswitch/arswitch_9340.c optional arswitch dev/etherswitch/arswitch/arswitch_vlans.c optional arswitch dev/etherswitch/etherswitch.c optional etherswitch dev/etherswitch/etherswitch_if.m optional etherswitch dev/etherswitch/ip17x/ip17x.c optional ip17x dev/etherswitch/ip17x/ip175c.c optional ip17x dev/etherswitch/ip17x/ip175d.c optional ip17x dev/etherswitch/ip17x/ip17x_phy.c optional ip17x dev/etherswitch/ip17x/ip17x_vlans.c optional ip17x dev/etherswitch/miiproxy.c optional miiproxy dev/etherswitch/rtl8366/rtl8366rb.c optional rtl8366rb dev/etherswitch/e6000sw/e6000sw.c optional e6000sw dev/etherswitch/ukswitch/ukswitch.c optional ukswitch dev/evdev/cdev.c optional evdev dev/evdev/evdev.c optional evdev dev/evdev/evdev_mt.c optional evdev dev/evdev/evdev_utils.c optional evdev dev/evdev/uinput.c optional evdev uinput dev/ex/if_ex.c optional ex dev/ex/if_ex_isa.c optional ex isa dev/ex/if_ex_pccard.c optional ex pccard dev/exca/exca.c optional cbb dev/extres/clk/clk.c optional ext_resources clk fdt dev/extres/clk/clkdev_if.m optional ext_resources clk fdt dev/extres/clk/clknode_if.m optional ext_resources clk fdt dev/extres/clk/clk_bus.c optional ext_resources clk fdt dev/extres/clk/clk_div.c optional ext_resources clk fdt dev/extres/clk/clk_fixed.c optional ext_resources clk fdt dev/extres/clk/clk_gate.c optional ext_resources clk fdt dev/extres/clk/clk_mux.c optional ext_resources clk fdt dev/extres/phy/phy.c optional ext_resources phy fdt dev/extres/phy/phy_if.m optional ext_resources phy fdt dev/extres/hwreset/hwreset.c optional ext_resources hwreset fdt dev/extres/hwreset/hwreset_if.m optional ext_resources hwreset fdt dev/extres/regulator/regdev_if.m optional ext_resources regulator fdt dev/extres/regulator/regnode_if.m optional ext_resources regulator fdt dev/extres/regulator/regulator.c optional ext_resources regulator fdt dev/extres/regulator/regulator_bus.c optional ext_resources regulator fdt dev/extres/regulator/regulator_fixed.c optional ext_resources regulator fdt dev/fatm/if_fatm.c optional fatm pci dev/fb/fbd.c optional fbd | vt dev/fb/fb_if.m standard dev/fb/splash.c optional sc splash dev/fdt/fdt_clock.c optional fdt fdt_clock dev/fdt/fdt_clock_if.m optional fdt fdt_clock dev/fdt/fdt_common.c optional fdt dev/fdt/fdt_pinctrl.c optional fdt fdt_pinctrl dev/fdt/fdt_pinctrl_if.m optional fdt fdt_pinctrl dev/fdt/fdt_slicer.c optional fdt cfi | fdt nand | fdt mx25l dev/fdt/fdt_static_dtb.S optional fdt fdt_dtb_static \ dependency "fdt_dtb_file" dev/fdt/simplebus.c optional fdt dev/fe/if_fe.c optional fe dev/fe/if_fe_pccard.c optional fe pccard dev/filemon/filemon.c optional filemon dev/firewire/firewire.c optional firewire dev/firewire/fwcrom.c optional firewire dev/firewire/fwdev.c optional firewire dev/firewire/fwdma.c optional firewire dev/firewire/fwmem.c optional firewire dev/firewire/fwohci.c optional firewire dev/firewire/fwohci_pci.c optional firewire pci dev/firewire/if_fwe.c optional fwe dev/firewire/if_fwip.c optional fwip dev/firewire/sbp.c optional sbp dev/firewire/sbp_targ.c optional sbp_targ dev/flash/at45d.c optional at45d dev/flash/mx25l.c optional mx25l dev/fxp/if_fxp.c optional fxp dev/fxp/inphy.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci dev/gem/if_gem_sbus.c optional gem sbus dev/gpio/gpiobacklight.c optional gpiobacklight fdt dev/gpio/gpiokeys.c optional gpiokeys fdt dev/gpio/gpiokeys_codes.c optional gpiokeys fdt dev/gpio/gpiobus.c optional gpio \ dependency "gpiobus_if.h" dev/gpio/gpioc.c optional gpio \ dependency "gpio_if.h" dev/gpio/gpioiic.c optional gpioiic dev/gpio/gpioled.c optional gpioled !fdt dev/gpio/gpioled_fdt.c optional gpioled fdt dev/gpio/gpiopower.c optional gpiopower fdt dev/gpio/gpioregulator.c optional gpioregulator fdt ext_resources dev/gpio/gpiospi.c optional gpiospi dev/gpio/gpioths.c optional gpioths dev/gpio/gpio_if.m optional gpio dev/gpio/gpiobus_if.m optional gpio dev/gpio/gpiopps.c optional gpiopps dev/gpio/ofw_gpiobus.c optional fdt gpio dev/hatm/if_hatm.c optional hatm pci dev/hatm/if_hatm_intr.c optional hatm pci dev/hatm/if_hatm_ioctl.c optional hatm pci dev/hatm/if_hatm_rx.c optional hatm pci dev/hatm/if_hatm_tx.c optional hatm pci dev/hifn/hifn7751.c optional hifn dev/hme/if_hme.c optional hme dev/hme/if_hme_pci.c optional hme pci dev/hme/if_hme_sbus.c optional hme sbus dev/hptiop/hptiop.c optional hptiop scbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/hwpmc/hwpmc_soft.c optional hwpmc dev/ichiic/ig4_acpi.c optional ig4 acpi iicbus dev/ichiic/ig4_iic.c optional ig4 iicbus dev/ichiic/ig4_pci.c optional ig4 pci iicbus dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida dev/ida/ida_disk.c optional ida dev/ida/ida_eisa.c optional ida eisa dev/ida/ida_pci.c optional ida pci dev/iicbus/ad7418.c optional ad7418 dev/iicbus/ds1307.c optional ds1307 dev/iicbus/ds133x.c optional ds133x dev/iicbus/ds1374.c optional ds1374 dev/iicbus/ds1672.c optional ds1672 dev/iicbus/ds3231.c optional ds3231 dev/iicbus/icee.c optional icee dev/iicbus/if_ic.c optional ic dev/iicbus/iic.c optional iic dev/iicbus/iicbb.c optional iicbb dev/iicbus/iicbb_if.m optional iicbb dev/iicbus/iicbus.c optional iicbus dev/iicbus/iicbus_if.m optional iicbus dev/iicbus/iiconf.c optional iicbus dev/iicbus/iicsmb.c optional iicsmb \ dependency "iicbus_if.h" dev/iicbus/iicoc.c optional iicoc dev/iicbus/lm75.c optional lm75 dev/iicbus/ofw_iicbus.c optional fdt iicbus dev/iicbus/pcf8563.c optional pcf8563 dev/iicbus/s35390a.c optional s35390a dev/iir/iir.c optional iir dev/iir/iir_ctrl.c optional iir dev/iir/iir_pci.c optional iir pci dev/intpm/intpm.c optional intpm pci # XXX Work around clang warning, until maintainer approves fix. dev/ips/ips.c optional ips \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/ips/ips_commands.c optional ips dev/ips/ips_disk.c optional ips dev/ips/ips_ioctl.c optional ips dev/ips/ips_pci.c optional ips pci dev/ipw/if_ipw.c optional ipw ipwbssfw.c optional ipwbssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_bss.fw:ipw_bss:130 -lintel_ipw -mipw_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwbssfw.c" ipw_bss.fwo optional ipwbssfw | ipwfw \ dependency "ipw_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_bss.fwo" ipw_bss.fw optional ipwbssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_bss.fw" ipwibssfw.c optional ipwibssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_ibss.fw:ipw_ibss:130 -lintel_ipw -mipw_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwibssfw.c" ipw_ibss.fwo optional ipwibssfw | ipwfw \ dependency "ipw_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_ibss.fwo" ipw_ibss.fw optional ipwibssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_ibss.fw" ipwmonitorfw.c optional ipwmonitorfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_monitor.fw:ipw_monitor:130 -lintel_ipw -mipw_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwmonitorfw.c" ipw_monitor.fwo optional ipwmonitorfw | ipwfw \ dependency "ipw_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_monitor.fwo" ipw_monitor.fw optional ipwmonitorfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_monitor.fw" dev/iscsi/icl.c optional iscsi | ctl dev/iscsi/icl_conn_if.m optional iscsi | ctl dev/iscsi/icl_soft.c optional iscsi | ctl dev/iscsi/icl_soft_proxy.c optional iscsi | ctl dev/iscsi/iscsi.c optional iscsi scbus dev/iscsi_initiator/iscsi.c optional iscsi_initiator scbus dev/iscsi_initiator/iscsi_subr.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_cam.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_soc.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_sm.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_subr.c optional iscsi_initiator scbus dev/ismt/ismt.c optional ismt dev/isl/isl.c optional isl iicbus dev/isp/isp.c optional isp dev/isp/isp_freebsd.c optional isp dev/isp/isp_library.c optional isp dev/isp/isp_pci.c optional isp pci dev/isp/isp_sbus.c optional isp sbus dev/isp/isp_target.c optional isp dev/ispfw/ispfw.c optional ispfw dev/iwi/if_iwi.c optional iwi iwibssfw.c optional iwibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_bss.fw:iwi_bss:300 -lintel_iwi -miwi_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwibssfw.c" iwi_bss.fwo optional iwibssfw | iwifw \ dependency "iwi_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_bss.fwo" iwi_bss.fw optional iwibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-bss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_bss.fw" iwiibssfw.c optional iwiibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_ibss.fw:iwi_ibss:300 -lintel_iwi -miwi_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwiibssfw.c" iwi_ibss.fwo optional iwiibssfw | iwifw \ dependency "iwi_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_ibss.fwo" iwi_ibss.fw optional iwiibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_ibss.fw" iwimonitorfw.c optional iwimonitorfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_monitor.fw:iwi_monitor:300 -lintel_iwi -miwi_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwimonitorfw.c" iwi_monitor.fwo optional iwimonitorfw | iwifw \ dependency "iwi_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_monitor.fwo" iwi_monitor.fw optional iwimonitorfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_monitor.fw" dev/iwm/if_iwm.c optional iwm dev/iwm/if_iwm_binding.c optional iwm dev/iwm/if_iwm_led.c optional iwm dev/iwm/if_iwm_mac_ctxt.c optional iwm dev/iwm/if_iwm_pcie_trans.c optional iwm dev/iwm/if_iwm_phy_ctxt.c optional iwm dev/iwm/if_iwm_phy_db.c optional iwm dev/iwm/if_iwm_power.c optional iwm dev/iwm/if_iwm_scan.c optional iwm dev/iwm/if_iwm_time_event.c optional iwm dev/iwm/if_iwm_util.c optional iwm iwm3160fw.c optional iwm3160fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3160.fw:iwm3160fw -miwm3160fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm3160fw.c" iwm3160fw.fwo optional iwm3160fw | iwmfw \ dependency "iwm3160.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm3160fw.fwo" iwm3160.fw optional iwm3160fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-3160-16.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm3160.fw" iwm7260fw.c optional iwm7260fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7260.fw:iwm7260fw -miwm7260fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7260fw.c" iwm7260fw.fwo optional iwm7260fw | iwmfw \ dependency "iwm7260.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7260fw.fwo" iwm7260.fw optional iwm7260fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7260-16.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7260.fw" iwm7265fw.c optional iwm7265fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265.fw:iwm7265fw -miwm7265fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7265fw.c" iwm7265fw.fwo optional iwm7265fw | iwmfw \ dependency "iwm7265.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7265fw.fwo" iwm7265.fw optional iwm7265fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7265-16.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7265.fw" iwm8000Cfw.c optional iwm8000Cfw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8000C.fw:iwm8000Cfw -miwm8000Cfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm8000Cfw.c" iwm8000Cfw.fwo optional iwm8000Cfw | iwmfw \ dependency "iwm8000C.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm8000Cfw.fwo" iwm8000C.fw optional iwm8000Cfw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-8000C-16.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm8000C.fw" dev/iwn/if_iwn.c optional iwn iwn1000fw.c optional iwn1000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn1000.fw:iwn1000fw -miwn1000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn1000fw.c" iwn1000fw.fwo optional iwn1000fw | iwnfw \ dependency "iwn1000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn1000fw.fwo" iwn1000.fw optional iwn1000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn1000.fw" iwn100fw.c optional iwn100fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn100.fw:iwn100fw -miwn100fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn100fw.c" iwn100fw.fwo optional iwn100fw | iwnfw \ dependency "iwn100.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn100fw.fwo" iwn100.fw optional iwn100fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-100-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn100.fw" iwn105fw.c optional iwn105fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn105.fw:iwn105fw -miwn105fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn105fw.c" iwn105fw.fwo optional iwn105fw | iwnfw \ dependency "iwn105.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn105fw.fwo" iwn105.fw optional iwn105fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-105-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn105.fw" iwn135fw.c optional iwn135fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn135.fw:iwn135fw -miwn135fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn135fw.c" iwn135fw.fwo optional iwn135fw | iwnfw \ dependency "iwn135.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn135fw.fwo" iwn135.fw optional iwn135fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-135-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn135.fw" iwn2000fw.c optional iwn2000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2000.fw:iwn2000fw -miwn2000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn2000fw.c" iwn2000fw.fwo optional iwn2000fw | iwnfw \ dependency "iwn2000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2000fw.fwo" iwn2000.fw optional iwn2000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-2000-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2000.fw" iwn2030fw.c optional iwn2030fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2030.fw:iwn2030fw -miwn2030fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn2030fw.c" iwn2030fw.fwo optional iwn2030fw | iwnfw \ dependency "iwn2030.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2030fw.fwo" iwn2030.fw optional iwn2030fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwnwifi-2030-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2030.fw" iwn4965fw.c optional iwn4965fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn4965.fw:iwn4965fw -miwn4965fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn4965fw.c" iwn4965fw.fwo optional iwn4965fw | iwnfw \ dependency "iwn4965.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn4965fw.fwo" iwn4965.fw optional iwn4965fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn4965.fw" iwn5000fw.c optional iwn5000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5000.fw:iwn5000fw -miwn5000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5000fw.c" iwn5000fw.fwo optional iwn5000fw | iwnfw \ dependency "iwn5000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5000fw.fwo" iwn5000.fw optional iwn5000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5000.fw" iwn5150fw.c optional iwn5150fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5150.fw:iwn5150fw -miwn5150fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5150fw.c" iwn5150fw.fwo optional iwn5150fw | iwnfw \ dependency "iwn5150.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5150fw.fwo" iwn5150.fw optional iwn5150fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu"\ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5150.fw" iwn6000fw.c optional iwn6000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000.fw:iwn6000fw -miwn6000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000fw.c" iwn6000fw.fwo optional iwn6000fw | iwnfw \ dependency "iwn6000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000fw.fwo" iwn6000.fw optional iwn6000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000.fw" iwn6000g2afw.c optional iwn6000g2afw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2a.fw:iwn6000g2afw -miwn6000g2afw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2afw.c" iwn6000g2afw.fwo optional iwn6000g2afw | iwnfw \ dependency "iwn6000g2a.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2afw.fwo" iwn6000g2a.fw optional iwn6000g2afw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2a-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2a.fw" iwn6000g2bfw.c optional iwn6000g2bfw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2b.fw:iwn6000g2bfw -miwn6000g2bfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2bfw.c" iwn6000g2bfw.fwo optional iwn6000g2bfw | iwnfw \ dependency "iwn6000g2b.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2bfw.fwo" iwn6000g2b.fw optional iwn6000g2bfw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2b-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2b.fw" iwn6050fw.c optional iwn6050fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6050.fw:iwn6050fw -miwn6050fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6050fw.c" iwn6050fw.fwo optional iwn6050fw | iwnfw \ dependency "iwn6050.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6050fw.fwo" iwn6050.fw optional iwn6050fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6050.fw" dev/ixgb/if_ixgb.c optional ixgb dev/ixgb/ixgb_ee.c optional ixgb dev/ixgb/ixgb_hw.c optional ixgb dev/ixgbe/if_ix.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/if_ixv.c optional ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_osdep.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_phy.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_api.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_common.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_mbx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_vf.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/jedec_ts/jedec_ts.c optional jedec_ts smbus dev/jme/if_jme.c optional jme pci dev/joy/joy.c optional joy dev/joy/joy_isa.c optional joy isa dev/kbd/kbd.c optional atkbd | pckbd | sc | ukbd | vt dev/kbdmux/kbdmux.c optional kbdmux dev/ksyms/ksyms.c optional ksyms dev/le/am7990.c optional le dev/le/am79900.c optional le dev/le/if_le_pci.c optional le pci dev/le/lance.c optional le dev/led/led.c standard dev/lge/if_lge.c optional lge dev/lmc/if_lmc.c optional lmc dev/malo/if_malo.c optional malo dev/malo/if_malohal.c optional malo dev/malo/if_malo_pci.c optional malo pci dev/mc146818/mc146818.c optional mc146818 dev/mca/mca_bus.c optional mca dev/md/md.c optional md dev/mdio/mdio_if.m optional miiproxy | mdio dev/mdio/mdio.c optional miiproxy | mdio dev/mem/memdev.c optional mem dev/mem/memutil.c optional mem dev/mfi/mfi.c optional mfi dev/mfi/mfi_debug.c optional mfi dev/mfi/mfi_pci.c optional mfi pci dev/mfi/mfi_disk.c optional mfi dev/mfi/mfi_syspd.c optional mfi dev/mfi/mfi_tbolt.c optional mfi dev/mfi/mfi_linux.c optional mfi compat_linux dev/mfi/mfi_cam.c optional mfip scbus dev/mii/acphy.c optional miibus | acphy dev/mii/amphy.c optional miibus | amphy dev/mii/atphy.c optional miibus | atphy dev/mii/axphy.c optional miibus | axphy dev/mii/bmtphy.c optional miibus | bmtphy dev/mii/brgphy.c optional miibus | brgphy dev/mii/ciphy.c optional miibus | ciphy dev/mii/e1000phy.c optional miibus | e1000phy dev/mii/gentbi.c optional miibus | gentbi dev/mii/icsphy.c optional miibus | icsphy dev/mii/ip1000phy.c optional miibus | ip1000phy dev/mii/jmphy.c optional miibus | jmphy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/micphy.c optional miibus fdt | micphy fdt dev/mii/mii.c optional miibus | mii dev/mii/mii_bitbang.c optional miibus | mii_bitbang dev/mii/mii_physubr.c optional miibus | mii dev/mii/miibus_if.m optional miibus | mii dev/mii/mlphy.c optional miibus | mlphy dev/mii/nsgphy.c optional miibus | nsgphy dev/mii/nsphy.c optional miibus | nsphy dev/mii/nsphyter.c optional miibus | nsphyter dev/mii/pnaphy.c optional miibus | pnaphy dev/mii/qsphy.c optional miibus | qsphy dev/mii/rdcphy.c optional miibus | rdcphy dev/mii/rgephy.c optional miibus | rgephy dev/mii/rlphy.c optional miibus | rlphy dev/mii/rlswitch.c optional rlswitch dev/mii/smcphy.c optional miibus | smcphy dev/mii/smscphy.c optional miibus | smscphy dev/mii/tdkphy.c optional miibus | tdkphy dev/mii/tlphy.c optional miibus | tlphy dev/mii/truephy.c optional miibus | truephy dev/mii/ukphy.c optional miibus | mii dev/mii/ukphy_subr.c optional miibus | mii dev/mii/xmphy.c optional miibus | xmphy dev/mk48txx/mk48txx.c optional mk48txx dev/mlx/mlx.c optional mlx dev/mlx/mlx_disk.c optional mlx dev/mlx/mlx_pci.c optional mlx pci dev/mly/mly.c optional mly dev/mmc/mmc.c optional mmc dev/mmc/mmcbr_if.m standard dev/mmc/mmcbus_if.m standard dev/mmc/mmcsd.c optional mmcsd dev/mn/if_mn.c optional mn pci dev/mpr/mpr.c optional mpr dev/mpr/mpr_config.c optional mpr # XXX Work around clang warning, until maintainer approves fix. dev/mpr/mpr_mapping.c optional mpr \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mpr/mpr_pci.c optional mpr pci dev/mpr/mpr_sas.c optional mpr \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mpr/mpr_sas_lsi.c optional mpr dev/mpr/mpr_table.c optional mpr dev/mpr/mpr_user.c optional mpr dev/mps/mps.c optional mps dev/mps/mps_config.c optional mps # XXX Work around clang warning, until maintainer approves fix. dev/mps/mps_mapping.c optional mps \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mps/mps_pci.c optional mps pci dev/mps/mps_sas.c optional mps \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mps/mps_sas_lsi.c optional mps dev/mps/mps_table.c optional mps dev/mps/mps_user.c optional mps dev/mpt/mpt.c optional mpt dev/mpt/mpt_cam.c optional mpt dev/mpt/mpt_debug.c optional mpt dev/mpt/mpt_pci.c optional mpt pci dev/mpt/mpt_raid.c optional mpt dev/mpt/mpt_user.c optional mpt dev/mrsas/mrsas.c optional mrsas dev/mrsas/mrsas_cam.c optional mrsas dev/mrsas/mrsas_ioctl.c optional mrsas dev/mrsas/mrsas_fp.c optional mrsas dev/msk/if_msk.c optional msk dev/mvs/mvs.c optional mvs dev/mvs/mvs_if.m optional mvs dev/mvs/mvs_pci.c optional mvs pci dev/mwl/if_mwl.c optional mwl dev/mwl/if_mwl_pci.c optional mwl pci dev/mwl/mwlhal.c optional mwl mwlfw.c optional mwlfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk mw88W8363.fw:mw88W8363fw mwlboot.fw:mwlboot -mmwl -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "mwlfw.c" mw88W8363.fwo optional mwlfw \ dependency "mw88W8363.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mw88W8363.fwo" mw88W8363.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mw88W8363.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mw88W8363.fw" mwlboot.fwo optional mwlfw \ dependency "mwlboot.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mwlboot.fwo" mwlboot.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mwlboot.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mwlboot.fw" dev/mxge/if_mxge.c optional mxge pci dev/mxge/mxge_eth_z8e.c optional mxge pci dev/mxge/mxge_ethp_z8e.c optional mxge pci dev/mxge/mxge_rss_eth_z8e.c optional mxge pci dev/mxge/mxge_rss_ethp_z8e.c optional mxge pci dev/my/if_my.c optional my dev/nand/nand.c optional nand dev/nand/nand_bbt.c optional nand dev/nand/nand_cdev.c optional nand dev/nand/nand_generic.c optional nand dev/nand/nand_geom.c optional nand dev/nand/nand_id.c optional nand dev/nand/nandbus.c optional nand dev/nand/nandbus_if.m optional nand dev/nand/nand_if.m optional nand dev/nand/nandsim.c optional nandsim nand dev/nand/nandsim_chip.c optional nandsim nand dev/nand/nandsim_ctrl.c optional nandsim nand dev/nand/nandsim_log.c optional nandsim nand dev/nand/nandsim_swap.c optional nandsim nand dev/nand/nfc_if.m optional nand dev/ncr/ncr.c optional ncr pci dev/ncv/ncr53c500.c optional ncv dev/ncv/ncr53c500_pccard.c optional ncv pccard dev/netmap/if_ptnet.c optional netmap inet dev/netmap/netmap.c optional netmap dev/netmap/netmap_freebsd.c optional netmap dev/netmap/netmap_generic.c optional netmap dev/netmap/netmap_mbq.c optional netmap dev/netmap/netmap_mem2.c optional netmap dev/netmap/netmap_monitor.c optional netmap dev/netmap/netmap_offloadings.c optional netmap dev/netmap/netmap_pipe.c optional netmap dev/netmap/netmap_pt.c optional netmap dev/netmap/netmap_vale.c optional netmap # compile-with "${NORMAL_C} -Wconversion -Wextra" dev/nfsmb/nfsmb.c optional nfsmb pci dev/nge/if_nge.c optional nge dev/nxge/if_nxge.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-device.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-mm.c optional nxge dev/nxge/xgehal/xge-queue.c optional nxge dev/nxge/xgehal/xgehal-driver.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-ring.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-channel.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-fifo.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-stats.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-config.c optional nxge dev/nxge/xgehal/xgehal-mgmt.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nmdm/nmdm.c optional nmdm dev/nsp/nsp.c optional nsp dev/nsp/nsp_pccard.c optional nsp pccard dev/null/null.c standard dev/oce/oce_hw.c optional oce pci dev/oce/oce_if.c optional oce pci dev/oce/oce_mbox.c optional oce pci dev/oce/oce_queue.c optional oce pci dev/oce/oce_sysctl.c optional oce pci dev/oce/oce_util.c optional oce pci dev/ofw/ofw_bus_if.m optional fdt dev/ofw/ofw_bus_subr.c optional fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_fdt.c optional fdt dev/ofw/ofw_if.m optional fdt dev/ofw/ofw_subr.c optional fdt dev/ofw/ofwbus.c optional fdt dev/ofw/openfirm.c optional fdt dev/ofw/openfirmio.c optional fdt dev/ow/ow.c optional ow \ dependency "owll_if.h" \ dependency "own_if.h" dev/ow/owll_if.m optional ow dev/ow/own_if.m optional ow dev/ow/ow_temp.c optional ow_temp dev/ow/owc_gpiobus.c optional owc gpio dev/patm/if_patm.c optional patm pci dev/patm/if_patm_attach.c optional patm pci dev/patm/if_patm_intr.c optional patm pci dev/patm/if_patm_ioctl.c optional patm pci dev/patm/if_patm_rtables.c optional patm pci dev/patm/if_patm_rx.c optional patm pci dev/patm/if_patm_tx.c optional patm pci dev/pbio/pbio.c optional pbio isa dev/pccard/card_if.m standard dev/pccard/pccard.c optional pccard dev/pccard/pccard_cis.c optional pccard dev/pccard/pccard_cis_quirks.c optional pccard dev/pccard/pccard_device.c optional pccard dev/pccard/power_if.m standard dev/pccbb/pccbb.c optional cbb dev/pccbb/pccbb_isa.c optional cbb isa dev/pccbb/pccbb_pci.c optional cbb pci dev/pcf/pcf.c optional pcf dev/pci/eisa_pci.c optional pci eisa dev/pci/fixup_pci.c optional pci dev/pci/hostb_pci.c optional pci dev/pci/ignore_pci.c optional pci dev/pci/isa_pci.c optional pci isa dev/pci/pci.c optional pci dev/pci/pci_if.m standard dev/pci/pci_iov.c optional pci pci_iov dev/pci/pci_iov_if.m standard dev/pci/pci_iov_schema.c optional pci pci_iov dev/pci/pci_pci.c optional pci dev/pci/pci_subr.c optional pci dev/pci/pci_user.c optional pci dev/pci/pcib_if.m standard dev/pci/pcib_support.c standard dev/pci/vga_pci.c optional pci dev/pcn/if_pcn.c optional pcn pci dev/pdq/if_fea.c optional fea eisa dev/pdq/if_fpa.c optional fpa pci dev/pdq/pdq.c optional nowerror fea eisa | fpa pci dev/pdq/pdq_ifsubr.c optional nowerror fea eisa | fpa pci dev/pms/freebsd/driver/ini/src/agtiapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sadisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saframe.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sahw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sainit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampicmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampirsp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saphy.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasata.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sassp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/satimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sautil.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saioctlcmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpidebug.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmsmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmdisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/sminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsatcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdesgl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdioctl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdhw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tddmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdsmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdtimers.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdio.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/ossasat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/ppbus/if_plip.c optional plip dev/ppbus/immio.c optional vpo dev/ppbus/lpbb.c optional lpbb dev/ppbus/lpt.c optional lpt dev/ppbus/pcfclock.c optional pcfclock dev/ppbus/ppb_1284.c optional ppbus dev/ppbus/ppb_base.c optional ppbus dev/ppbus/ppb_msq.c optional ppbus dev/ppbus/ppbconf.c optional ppbus dev/ppbus/ppbus_if.m optional ppbus dev/ppbus/ppi.c optional ppi dev/ppbus/pps.c optional pps dev/ppbus/vpo.c optional vpo dev/ppbus/vpoio.c optional vpo dev/ppc/ppc.c optional ppc dev/ppc/ppc_acpi.c optional ppc acpi dev/ppc/ppc_isa.c optional ppc isa dev/ppc/ppc_pci.c optional ppc pci dev/ppc/ppc_puc.c optional ppc puc dev/proto/proto_bus_isa.c optional proto acpi | proto isa dev/proto/proto_bus_pci.c optional proto pci dev/proto/proto_busdma.c optional proto dev/proto/proto_core.c optional proto dev/pst/pst-iop.c optional pst dev/pst/pst-pci.c optional pst pci dev/pst/pst-raid.c optional pst dev/pty/pty.c optional pty dev/puc/puc.c optional puc dev/puc/puc_cfg.c optional puc dev/puc/puc_pccard.c optional puc pccard dev/puc/puc_pci.c optional puc pci dev/puc/pucdata.c optional puc pci dev/quicc/quicc_core.c optional quicc dev/ral/rt2560.c optional ral dev/ral/rt2661.c optional ral dev/ral/rt2860.c optional ral dev/ral/if_ral_pci.c optional ral pci rt2561fw.c optional rt2561fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561.fw:rt2561fw -mrt2561 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561fw.c" rt2561fw.fwo optional rt2561fw | ralfw \ dependency "rt2561.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561fw.fwo" rt2561.fw optional rt2561fw | ralfw \ dependency "$S/contrib/dev/ral/rt2561.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561.fw" rt2561sfw.c optional rt2561sfw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561s.fw:rt2561sfw -mrt2561s -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561sfw.c" rt2561sfw.fwo optional rt2561sfw | ralfw \ dependency "rt2561s.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561sfw.fwo" rt2561s.fw optional rt2561sfw | ralfw \ dependency "$S/contrib/dev/ral/rt2561s.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561s.fw" rt2661fw.c optional rt2661fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2661.fw:rt2661fw -mrt2661 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2661fw.c" rt2661fw.fwo optional rt2661fw | ralfw \ dependency "rt2661.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2661fw.fwo" rt2661.fw optional rt2661fw | ralfw \ dependency "$S/contrib/dev/ral/rt2661.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2661.fw" rt2860fw.c optional rt2860fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2860.fw:rt2860fw -mrt2860 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2860fw.c" rt2860fw.fwo optional rt2860fw | ralfw \ dependency "rt2860.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2860fw.fwo" rt2860.fw optional rt2860fw | ralfw \ dependency "$S/contrib/dev/ral/rt2860.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2860.fw" dev/random/random_infra.c optional random dev/random/random_harvestq.c optional random dev/random/randomdev.c optional random random_yarrow | \ random !random_yarrow !random_loadable dev/random/yarrow.c optional random random_yarrow dev/random/fortuna.c optional random !random_yarrow !random_loadable dev/random/hash.c optional random random_yarrow | \ random !random_yarrow !random_loadable dev/rc/rc.c optional rc dev/rccgpio/rccgpio.c optional rccgpio gpio dev/re/if_re.c optional re dev/rl/if_rl.c optional rl pci dev/rndtest/rndtest.c optional rndtest dev/rp/rp.c optional rp dev/rp/rp_isa.c optional rp isa dev/rp/rp_pci.c optional rp pci # dev/rtwn/if_rtwn.c optional rtwn dev/rtwn/if_rtwn_beacon.c optional rtwn dev/rtwn/if_rtwn_calib.c optional rtwn dev/rtwn/if_rtwn_cam.c optional rtwn dev/rtwn/if_rtwn_efuse.c optional rtwn dev/rtwn/if_rtwn_fw.c optional rtwn dev/rtwn/if_rtwn_rx.c optional rtwn dev/rtwn/if_rtwn_task.c optional rtwn dev/rtwn/if_rtwn_tx.c optional rtwn # dev/rtwn/pci/rtwn_pci_attach.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_reg.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_rx.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_tx.c optional rtwn_pci pci # dev/rtwn/usb/rtwn_usb_attach.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_ep.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_reg.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_rx.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_tx.c optional rtwn_usb # RTL8188E dev/rtwn/rtl8188e/r88e_beacon.c optional rtwn dev/rtwn/rtl8188e/r88e_calib.c optional rtwn dev/rtwn/rtl8188e/r88e_chan.c optional rtwn dev/rtwn/rtl8188e/r88e_fw.c optional rtwn dev/rtwn/rtl8188e/r88e_init.c optional rtwn dev/rtwn/rtl8188e/r88e_led.c optional rtwn dev/rtwn/rtl8188e/r88e_tx.c optional rtwn dev/rtwn/rtl8188e/r88e_rf.c optional rtwn dev/rtwn/rtl8188e/r88e_rom.c optional rtwn dev/rtwn/rtl8188e/r88e_rx.c optional rtwn dev/rtwn/rtl8188e/usb/r88eu_attach.c optional rtwn_usb dev/rtwn/rtl8188e/usb/r88eu_init.c optional rtwn_usb dev/rtwn/rtl8188e/usb/r88eu_rx.c optional rtwn_usb # RTL8192C dev/rtwn/rtl8192c/r92c_attach.c optional rtwn dev/rtwn/rtl8192c/r92c_beacon.c optional rtwn dev/rtwn/rtl8192c/r92c_calib.c optional rtwn dev/rtwn/rtl8192c/r92c_chan.c optional rtwn dev/rtwn/rtl8192c/r92c_fw.c optional rtwn dev/rtwn/rtl8192c/r92c_init.c optional rtwn +dev/rtwn/rtl8192c/r92c_llt.c optional rtwn dev/rtwn/rtl8192c/r92c_rf.c optional rtwn dev/rtwn/rtl8192c/r92c_rom.c optional rtwn dev/rtwn/rtl8192c/r92c_rx.c optional rtwn dev/rtwn/rtl8192c/r92c_tx.c optional rtwn dev/rtwn/rtl8192c/pci/r92ce_attach.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_calib.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_fw.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_init.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_led.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_rx.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_tx.c optional rtwn_pci pci dev/rtwn/rtl8192c/usb/r92cu_attach.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_init.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_led.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_rx.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_tx.c optional rtwn_usb +# RTL8192E +dev/rtwn/rtl8192e/r92e_chan.c optional rtwn +dev/rtwn/rtl8192e/r92e_fw.c optional rtwn +dev/rtwn/rtl8192e/r92e_init.c optional rtwn +dev/rtwn/rtl8192e/r92e_led.c optional rtwn +dev/rtwn/rtl8192e/r92e_rf.c optional rtwn +dev/rtwn/rtl8192e/r92e_rom.c optional rtwn +dev/rtwn/rtl8192e/r92e_rx.c optional rtwn +dev/rtwn/rtl8192e/usb/r92eu_attach.c optional rtwn_usb +dev/rtwn/rtl8192e/usb/r92eu_init.c optional rtwn_usb # RTL8812A dev/rtwn/rtl8812a/r12a_beacon.c optional rtwn dev/rtwn/rtl8812a/r12a_calib.c optional rtwn dev/rtwn/rtl8812a/r12a_caps.c optional rtwn dev/rtwn/rtl8812a/r12a_chan.c optional rtwn dev/rtwn/rtl8812a/r12a_fw.c optional rtwn dev/rtwn/rtl8812a/r12a_init.c optional rtwn dev/rtwn/rtl8812a/r12a_led.c optional rtwn dev/rtwn/rtl8812a/r12a_rf.c optional rtwn dev/rtwn/rtl8812a/r12a_rom.c optional rtwn dev/rtwn/rtl8812a/r12a_rx.c optional rtwn dev/rtwn/rtl8812a/r12a_tx.c optional rtwn dev/rtwn/rtl8812a/usb/r12au_attach.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_init.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_rx.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_tx.c optional rtwn_usb # RTL8821A dev/rtwn/rtl8821a/r21a_beacon.c optional rtwn dev/rtwn/rtl8821a/r21a_calib.c optional rtwn dev/rtwn/rtl8821a/r21a_chan.c optional rtwn dev/rtwn/rtl8821a/r21a_fw.c optional rtwn dev/rtwn/rtl8821a/r21a_init.c optional rtwn dev/rtwn/rtl8821a/r21a_led.c optional rtwn dev/rtwn/rtl8821a/r21a_rom.c optional rtwn dev/rtwn/rtl8821a/r21a_rx.c optional rtwn dev/rtwn/rtl8821a/usb/r21au_attach.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_dfs.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_init.c optional rtwn_usb rtwn-rtl8188eufw.c optional rtwn-rtl8188eufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eufw.fw:rtwn-rtl8188eufw:111 -mrtwn-rtl8188eufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8188eufw.c" rtwn-rtl8188eufw.fwo optional rtwn-rtl8188eufw | rtwnfw \ dependency "rtwn-rtl8188eufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8188eufw.fwo" rtwn-rtl8188eufw.fw optional rtwn-rtl8188eufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8188eufw.fw" rtwn-rtl8192cfwE.c optional rtwn-rtl8192cfwE | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE.fw:rtwn-rtl8192cfwE:111 -mrtwn-rtl8192cfwE -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE.c" rtwn-rtl8192cfwE.fwo optional rtwn-rtl8192cfwE | rtwnfw \ dependency "rtwn-rtl8192cfwE.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE.fwo" rtwn-rtl8192cfwE.fw optional rtwn-rtl8192cfwE | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE.fw" rtwn-rtl8192cfwE_B.c optional rtwn-rtl8192cfwE_B | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE_B.fw:rtwn-rtl8192cfwE_B:111 -mrtwn-rtl8192cfwE_B -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE_B.c" rtwn-rtl8192cfwE_B.fwo optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "rtwn-rtl8192cfwE_B.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fwo" rtwn-rtl8192cfwE_B.fw optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE_B.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fw" rtwn-rtl8192cfwT.c optional rtwn-rtl8192cfwT | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwT.fw:rtwn-rtl8192cfwT:111 -mrtwn-rtl8192cfwT -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwT.c" rtwn-rtl8192cfwT.fwo optional rtwn-rtl8192cfwT | rtwnfw \ dependency "rtwn-rtl8192cfwT.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwT.fwo" rtwn-rtl8192cfwT.fw optional rtwn-rtl8192cfwT | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwT.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwT.fw" rtwn-rtl8192cfwU.c optional rtwn-rtl8192cfwU | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwU.fw:rtwn-rtl8192cfwU:111 -mrtwn-rtl8192cfwU -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwU.c" rtwn-rtl8192cfwU.fwo optional rtwn-rtl8192cfwU | rtwnfw \ dependency "rtwn-rtl8192cfwU.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwU.fwo" rtwn-rtl8192cfwU.fw optional rtwn-rtl8192cfwU | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwU.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwU.fw" +rtwn-rtl8192eufw.c optional rtwn-rtl8192eufw | rtwnfw \ + compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192eufw.fw:rtwn-rtl8192eufw:111 -mrtwn-rtl8192eufw -c${.TARGET}" \ + no-implicit-rule before-depend local \ + clean "rtwn-rtl8192eufw.c" +rtwn-rtl8192eufw.fwo optional rtwn-rtl8192eufw | rtwnfw \ + dependency "rtwn-rtl8192eufw.fw" \ + compile-with "${NORMAL_FWO}" \ + no-implicit-rule \ + clean "rtwn-rtl8192eufw.fwo" +rtwn-rtl8192eufw.fw optional rtwn-rtl8192eufw | rtwnfw \ + dependency "$S/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu" \ + compile-with "${NORMAL_FW}" \ + no-obj no-implicit-rule \ + clean "rtwn-rtl8192eufw.fw" rtwn-rtl8812aufw.c optional rtwn-rtl8812aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8812aufw.fw:rtwn-rtl8812aufw:111 -mrtwn-rtl8812aufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8812aufw.c" rtwn-rtl8812aufw.fwo optional rtwn-rtl8812aufw | rtwnfw \ dependency "rtwn-rtl8812aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8812aufw.fwo" rtwn-rtl8812aufw.fw optional rtwn-rtl8812aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8812aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8812aufw.fw" rtwn-rtl8821aufw.c optional rtwn-rtl8821aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8821aufw.fw:rtwn-rtl8821aufw:111 -mrtwn-rtl8821aufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8821aufw.c" rtwn-rtl8821aufw.fwo optional rtwn-rtl8821aufw | rtwnfw \ dependency "rtwn-rtl8821aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8821aufw.fwo" rtwn-rtl8821aufw.fw optional rtwn-rtl8821aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8821aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8821aufw.fw" dev/safe/safe.c optional safe dev/scc/scc_if.m optional scc dev/scc/scc_bfe_ebus.c optional scc ebus dev/scc/scc_bfe_quicc.c optional scc quicc dev/scc/scc_bfe_sbus.c optional scc fhc | scc sbus dev/scc/scc_core.c optional scc dev/scc/scc_dev_quicc.c optional scc quicc dev/scc/scc_dev_sab82532.c optional scc dev/scc/scc_dev_z8530.c optional scc dev/sdhci/sdhci.c optional sdhci dev/sdhci/sdhci_fdt_gpio.c optional sdhci fdt gpio dev/sdhci/sdhci_if.m optional sdhci dev/sdhci/sdhci_acpi.c optional sdhci acpi dev/sdhci/sdhci_pci.c optional sdhci pci dev/sf/if_sf.c optional sf pci dev/sge/if_sge.c optional sge pci dev/siba/siba_bwn.c optional siba_bwn pci dev/siba/siba_core.c optional siba_bwn pci dev/siis/siis.c optional siis pci dev/sis/if_sis.c optional sis pci dev/sk/if_sk.c optional sk pci dev/smbus/smb.c optional smb dev/smbus/smbconf.c optional smbus dev/smbus/smbus.c optional smbus dev/smbus/smbus_if.m optional smbus dev/smc/if_smc.c optional smc dev/smc/if_smc_fdt.c optional smc fdt dev/sn/if_sn.c optional sn dev/sn/if_sn_isa.c optional sn isa dev/sn/if_sn_pccard.c optional sn pccard dev/snp/snp.c optional snp dev/sound/clone.c optional sound dev/sound/unit.c optional sound dev/sound/isa/ad1816.c optional snd_ad1816 isa dev/sound/isa/ess.c optional snd_ess isa dev/sound/isa/gusc.c optional snd_gusc isa dev/sound/isa/mss.c optional snd_mss isa dev/sound/isa/sb16.c optional snd_sb16 isa dev/sound/isa/sb8.c optional snd_sb8 isa dev/sound/isa/sbc.c optional snd_sbc isa dev/sound/isa/sndbuf_dma.c optional sound isa dev/sound/pci/als4000.c optional snd_als4000 pci dev/sound/pci/atiixp.c optional snd_atiixp pci dev/sound/pci/cmi.c optional snd_cmi pci dev/sound/pci/cs4281.c optional snd_cs4281 pci dev/sound/pci/csa.c optional snd_csa pci dev/sound/pci/csapcm.c optional snd_csa pci dev/sound/pci/ds1.c optional snd_ds1 pci dev/sound/pci/emu10k1.c optional snd_emu10k1 pci dev/sound/pci/emu10kx.c optional snd_emu10kx pci dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci dev/sound/pci/envy24.c optional snd_envy24 pci dev/sound/pci/envy24ht.c optional snd_envy24ht pci dev/sound/pci/es137x.c optional snd_es137x pci dev/sound/pci/fm801.c optional snd_fm801 pci dev/sound/pci/ich.c optional snd_ich pci dev/sound/pci/maestro.c optional snd_maestro pci dev/sound/pci/maestro3.c optional snd_maestro3 pci dev/sound/pci/neomagic.c optional snd_neomagic pci dev/sound/pci/solo.c optional snd_solo pci dev/sound/pci/spicds.c optional snd_spicds pci dev/sound/pci/t4dwave.c optional snd_t4dwave pci dev/sound/pci/via8233.c optional snd_via8233 pci dev/sound/pci/via82c686.c optional snd_via82c686 pci dev/sound/pci/vibes.c optional snd_vibes pci dev/sound/pci/hda/hdaa.c optional snd_hda pci dev/sound/pci/hda/hdaa_patches.c optional snd_hda pci dev/sound/pci/hda/hdac.c optional snd_hda pci dev/sound/pci/hda/hdac_if.m optional snd_hda pci dev/sound/pci/hda/hdacc.c optional snd_hda pci dev/sound/pci/hdspe.c optional snd_hdspe pci dev/sound/pci/hdspe-pcm.c optional snd_hdspe pci dev/sound/pcm/ac97.c optional sound dev/sound/pcm/ac97_if.m optional sound dev/sound/pcm/ac97_patch.c optional sound dev/sound/pcm/buffer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/channel.c optional sound dev/sound/pcm/channel_if.m optional sound dev/sound/pcm/dsp.c optional sound dev/sound/pcm/feeder.c optional sound dev/sound/pcm/feeder_chain.c optional sound dev/sound/pcm/feeder_eq.c optional sound \ dependency "feeder_eq_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_if.m optional sound dev/sound/pcm/feeder_format.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_matrix.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_mixer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_rate.c optional sound \ dependency "feeder_rate_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_volume.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/mixer.c optional sound dev/sound/pcm/mixer_if.m optional sound dev/sound/pcm/sndstat.c optional sound dev/sound/pcm/sound.c optional sound dev/sound/pcm/vchan.c optional sound dev/sound/usb/uaudio.c optional snd_uaudio usb dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb dev/sound/midi/midi.c optional sound dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/midi/sequencer.c optional sound dev/sound/midi/synth_if.m optional sound dev/spibus/ofw_spibus.c optional fdt spibus dev/spibus/spibus.c optional spibus \ dependency "spibus_if.h" dev/spibus/spigen.c optional spigen dev/spibus/spibus_if.m optional spibus dev/ste/if_ste.c optional ste pci dev/stg/tmc18c30.c optional stg dev/stg/tmc18c30_isa.c optional stg isa dev/stg/tmc18c30_pccard.c optional stg pccard dev/stg/tmc18c30_pci.c optional stg pci dev/stg/tmc18c30_subr.c optional stg dev/stge/if_stge.c optional stge dev/streams/streams.c optional streams dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" dev/syscons/blank/blank_saver.c optional blank_saver dev/syscons/daemon/daemon_saver.c optional daemon_saver dev/syscons/dragon/dragon_saver.c optional dragon_saver dev/syscons/fade/fade_saver.c optional fade_saver dev/syscons/fire/fire_saver.c optional fire_saver dev/syscons/green/green_saver.c optional green_saver dev/syscons/logo/logo.c optional logo_saver dev/syscons/logo/logo_saver.c optional logo_saver dev/syscons/rain/rain_saver.c optional rain_saver dev/syscons/schistory.c optional sc dev/syscons/scmouse.c optional sc dev/syscons/scterm.c optional sc dev/syscons/scvidctl.c optional sc dev/syscons/snake/snake_saver.c optional snake_saver dev/syscons/star/star_saver.c optional star_saver dev/syscons/syscons.c optional sc dev/syscons/sysmouse.c optional sc dev/syscons/warp/warp_saver.c optional warp_saver dev/tdfx/tdfx_linux.c optional tdfx_linux tdfx compat_linux dev/tdfx/tdfx_pci.c optional tdfx pci dev/ti/if_ti.c optional ti pci dev/tl/if_tl.c optional tl pci dev/trm/trm.c optional trm dev/twa/tw_cl_init.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_intr.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_io.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_misc.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_cam.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_freebsd.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twe/twe.c optional twe dev/twe/twe_freebsd.c optional twe dev/tws/tws.c optional tws dev/tws/tws_cam.c optional tws dev/tws/tws_hdm.c optional tws dev/tws/tws_services.c optional tws dev/tws/tws_user.c optional tws dev/tx/if_tx.c optional tx dev/txp/if_txp.c optional txp dev/uart/uart_bus_acpi.c optional uart acpi dev/uart/uart_bus_ebus.c optional uart ebus dev/uart/uart_bus_fdt.c optional uart fdt dev/uart/uart_bus_isa.c optional uart isa dev/uart/uart_bus_pccard.c optional uart pccard dev/uart/uart_bus_pci.c optional uart pci dev/uart/uart_bus_puc.c optional uart puc dev/uart/uart_bus_scc.c optional uart scc dev/uart/uart_core.c optional uart dev/uart/uart_dbg.c optional uart gdb dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 | uart uart_snps dev/uart/uart_dev_pl011.c optional uart pl011 dev/uart/uart_dev_quicc.c optional uart quicc dev/uart/uart_dev_sab82532.c optional uart uart_sab82532 dev/uart/uart_dev_sab82532.c optional uart scc dev/uart/uart_dev_snps.c optional uart uart_snps fdt dev/uart/uart_dev_z8530.c optional uart uart_z8530 dev/uart/uart_dev_z8530.c optional uart scc dev/uart/uart_if.m optional uart dev/uart/uart_subr.c optional uart dev/uart/uart_tty.c optional uart dev/ubsec/ubsec.c optional ubsec # # USB controller drivers # dev/usb/controller/at91dci.c optional at91dci dev/usb/controller/at91dci_atmelarm.c optional at91dci at91rm9200 dev/usb/controller/musb_otg.c optional musb dev/usb/controller/musb_otg_atmelarm.c optional musb at91rm9200 dev/usb/controller/dwc_otg.c optional dwcotg dev/usb/controller/dwc_otg_fdt.c optional dwcotg fdt dev/usb/controller/ehci.c optional ehci dev/usb/controller/ehci_pci.c optional ehci pci dev/usb/controller/ohci.c optional ohci dev/usb/controller/ohci_pci.c optional ohci pci dev/usb/controller/uhci.c optional uhci dev/usb/controller/uhci_pci.c optional uhci pci dev/usb/controller/xhci.c optional xhci dev/usb/controller/xhci_pci.c optional xhci pci dev/usb/controller/saf1761_otg.c optional saf1761otg dev/usb/controller/saf1761_otg_fdt.c optional saf1761otg fdt dev/usb/controller/uss820dci.c optional uss820dci dev/usb/controller/uss820dci_atmelarm.c optional uss820dci at91rm9200 dev/usb/controller/usb_controller.c optional usb # # USB storage drivers # dev/usb/storage/umass.c optional umass dev/usb/storage/urio.c optional urio dev/usb/storage/ustorage_fs.c optional usfs # # USB core # dev/usb/usb_busdma.c optional usb dev/usb/usb_core.c optional usb dev/usb/usb_debug.c optional usb dev/usb/usb_dev.c optional usb dev/usb/usb_device.c optional usb dev/usb/usb_dynamic.c optional usb dev/usb/usb_error.c optional usb dev/usb/usb_generic.c optional usb dev/usb/usb_handle_request.c optional usb dev/usb/usb_hid.c optional usb dev/usb/usb_hub.c optional usb dev/usb/usb_if.m optional usb dev/usb/usb_lookup.c optional usb dev/usb/usb_mbuf.c optional usb dev/usb/usb_msctest.c optional usb dev/usb/usb_parse.c optional usb dev/usb/usb_pf.c optional usb dev/usb/usb_process.c optional usb dev/usb/usb_request.c optional usb dev/usb/usb_transfer.c optional usb dev/usb/usb_util.c optional usb # # USB network drivers # dev/usb/net/if_aue.c optional aue dev/usb/net/if_axe.c optional axe dev/usb/net/if_axge.c optional axge dev/usb/net/if_cdce.c optional cdce dev/usb/net/if_cue.c optional cue dev/usb/net/if_ipheth.c optional ipheth dev/usb/net/if_kue.c optional kue dev/usb/net/if_mos.c optional mos dev/usb/net/if_rue.c optional rue dev/usb/net/if_smsc.c optional smsc dev/usb/net/if_udav.c optional udav dev/usb/net/if_ure.c optional ure dev/usb/net/if_usie.c optional usie dev/usb/net/if_urndis.c optional urndis dev/usb/net/ruephy.c optional rue dev/usb/net/usb_ethernet.c optional uether | aue | axe | axge | cdce | \ cue | ipheth | kue | mos | rue | \ smsc | udav | ure | urndis dev/usb/net/uhso.c optional uhso # # USB WLAN drivers # dev/usb/wlan/if_rsu.c optional rsu rsu-rtl8712fw.c optional rsu-rtl8712fw | rsufw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rsu-rtl8712fw.fw:rsu-rtl8712fw:120 -mrsu-rtl8712fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rsu-rtl8712fw.c" rsu-rtl8712fw.fwo optional rsu-rtl8712fw | rsufw \ dependency "rsu-rtl8712fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rsu-rtl8712fw.fwo" rsu-rtl8712fw.fw optional rsu-rtl8712.fw | rsufw \ dependency "$S/contrib/dev/rsu/rsu-rtl8712fw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rsu-rtl8712fw.fw" dev/usb/wlan/if_rum.c optional rum dev/usb/wlan/if_run.c optional run runfw.c optional runfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk run.fw:runfw -mrunfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "runfw.c" runfw.fwo optional runfw \ dependency "run.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "runfw.fwo" run.fw optional runfw \ dependency "$S/contrib/dev/run/rt2870.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "run.fw" dev/usb/wlan/if_uath.c optional uath dev/usb/wlan/if_upgt.c optional upgt dev/usb/wlan/if_ural.c optional ural dev/usb/wlan/if_urtw.c optional urtw dev/usb/wlan/if_zyd.c optional zyd # # USB serial and parallel port drivers # dev/usb/serial/u3g.c optional u3g dev/usb/serial/uark.c optional uark dev/usb/serial/ubsa.c optional ubsa dev/usb/serial/ubser.c optional ubser dev/usb/serial/uchcom.c optional uchcom dev/usb/serial/ucycom.c optional ucycom dev/usb/serial/ufoma.c optional ufoma dev/usb/serial/uftdi.c optional uftdi dev/usb/serial/ugensa.c optional ugensa dev/usb/serial/uipaq.c optional uipaq dev/usb/serial/ulpt.c optional ulpt dev/usb/serial/umcs.c optional umcs dev/usb/serial/umct.c optional umct dev/usb/serial/umodem.c optional umodem dev/usb/serial/umoscom.c optional umoscom dev/usb/serial/uplcom.c optional uplcom dev/usb/serial/uslcom.c optional uslcom dev/usb/serial/uvisor.c optional uvisor dev/usb/serial/uvscom.c optional uvscom dev/usb/serial/usb_serial.c optional ucom | u3g | uark | ubsa | ubser | \ uchcom | ucycom | ufoma | uftdi | \ ugensa | uipaq | umcs | umct | \ umodem | umoscom | uplcom | usie | \ uslcom | uvisor | uvscom # # USB misc drivers # dev/usb/misc/ufm.c optional ufm dev/usb/misc/udbp.c optional udbp dev/usb/misc/ugold.c optional ugold dev/usb/misc/uled.c optional uled # # USB input drivers # dev/usb/input/atp.c optional atp dev/usb/input/uep.c optional uep dev/usb/input/uhid.c optional uhid dev/usb/input/ukbd.c optional ukbd dev/usb/input/ums.c optional ums dev/usb/input/wsp.c optional wsp # # USB quirks # dev/usb/quirk/usb_quirk.c optional usb # # USB templates # dev/usb/template/usb_template.c optional usb_template dev/usb/template/usb_template_audio.c optional usb_template dev/usb/template/usb_template_cdce.c optional usb_template dev/usb/template/usb_template_kbd.c optional usb_template dev/usb/template/usb_template_modem.c optional usb_template dev/usb/template/usb_template_mouse.c optional usb_template dev/usb/template/usb_template_msc.c optional usb_template dev/usb/template/usb_template_mtp.c optional usb_template dev/usb/template/usb_template_phone.c optional usb_template dev/usb/template/usb_template_serialnet.c optional usb_template dev/usb/template/usb_template_midi.c optional usb_template # # USB video drivers # dev/usb/video/udl.c optional udl # # USB END # dev/videomode/videomode.c optional videomode dev/videomode/edid.c optional videomode dev/videomode/pickmode.c optional videomode dev/videomode/vesagtf.c optional videomode dev/utopia/idtphy.c optional utopia dev/utopia/suni.c optional utopia dev/utopia/utopia.c optional utopia dev/vge/if_vge.c optional vge dev/viapm/viapm.c optional viapm pci dev/virtio/virtio.c optional virtio dev/virtio/virtqueue.c optional virtio dev/virtio/virtio_bus_if.m optional virtio dev/virtio/virtio_if.m optional virtio dev/virtio/pci/virtio_pci.c optional virtio_pci dev/virtio/mmio/virtio_mmio.c optional virtio_mmio fdt dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio fdt dev/virtio/network/if_vtnet.c optional vtnet dev/virtio/block/virtio_blk.c optional virtio_blk dev/virtio/balloon/virtio_balloon.c optional virtio_balloon dev/virtio/scsi/virtio_scsi.c optional virtio_scsi dev/virtio/random/virtio_random.c optional virtio_random dev/virtio/console/virtio_console.c optional virtio_console dev/vkbd/vkbd.c optional vkbd dev/vr/if_vr.c optional vr pci dev/vt/colors/vt_termcolors.c optional vt dev/vt/font/vt_font_default.c optional vt dev/vt/font/vt_mouse_cursor.c optional vt dev/vt/hw/efifb/efifb.c optional vt_efifb dev/vt/hw/fb/vt_fb.c optional vt dev/vt/hw/vga/vt_vga.c optional vt vt_vga dev/vt/logo/logo_freebsd.c optional vt splash dev/vt/logo/logo_beastie.c optional vt splash dev/vt/vt_buf.c optional vt dev/vt/vt_consolectl.c optional vt dev/vt/vt_core.c optional vt dev/vt/vt_cpulogos.c optional vt splash dev/vt/vt_font.c optional vt dev/vt/vt_sysmouse.c optional vt dev/vte/if_vte.c optional vte pci dev/vx/if_vx.c optional vx dev/vx/if_vx_eisa.c optional vx eisa dev/vx/if_vx_pci.c optional vx pci dev/vxge/vxge.c optional vxge dev/vxge/vxgehal/vxgehal-ifmsg.c optional vxge dev/vxge/vxgehal/vxgehal-mrpcim.c optional vxge dev/vxge/vxgehal/vxge-queue.c optional vxge dev/vxge/vxgehal/vxgehal-ring.c optional vxge dev/vxge/vxgehal/vxgehal-swapper.c optional vxge dev/vxge/vxgehal/vxgehal-mgmt.c optional vxge dev/vxge/vxgehal/vxgehal-srpcim.c optional vxge dev/vxge/vxgehal/vxgehal-config.c optional vxge dev/vxge/vxgehal/vxgehal-blockpool.c optional vxge dev/vxge/vxgehal/vxgehal-doorbells.c optional vxge dev/vxge/vxgehal/vxgehal-mgmtaux.c optional vxge dev/vxge/vxgehal/vxgehal-device.c optional vxge dev/vxge/vxgehal/vxgehal-mm.c optional vxge dev/vxge/vxgehal/vxgehal-driver.c optional vxge dev/vxge/vxgehal/vxgehal-virtualpath.c optional vxge dev/vxge/vxgehal/vxgehal-channel.c optional vxge dev/vxge/vxgehal/vxgehal-fifo.c optional vxge dev/watchdog/watchdog.c standard dev/wb/if_wb.c optional wb pci dev/wi/if_wi.c optional wi dev/wi/if_wi_pccard.c optional wi pccard dev/wi/if_wi_pci.c optional wi pci dev/wpi/if_wpi.c optional wpi pci wpifw.c optional wpifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk wpi.fw:wpifw:153229 -mwpi -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "wpifw.c" wpifw.fwo optional wpifw \ dependency "wpi.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "wpifw.fwo" wpi.fw optional wpifw \ dependency "$S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "wpi.fw" dev/xdma/xdma.c optional xdma dev/xdma/xdma_if.m optional xdma dev/xdma/xdma_fdt_test.c optional xdma xdma_test fdt dev/xe/if_xe.c optional xe dev/xe/if_xe_pccard.c optional xe pccard dev/xen/balloon/balloon.c optional xenhvm dev/xen/blkfront/blkfront.c optional xenhvm dev/xen/blkback/blkback.c optional xenhvm dev/xen/console/xen_console.c optional xenhvm dev/xen/control/control.c optional xenhvm dev/xen/grant_table/grant_table.c optional xenhvm dev/xen/netback/netback.c optional xenhvm dev/xen/netfront/netfront.c optional xenhvm dev/xen/xenpci/xenpci.c optional xenpci dev/xen/timer/timer.c optional xenhvm dev/xen/pvcpu/pvcpu.c optional xenhvm dev/xen/xenstore/xenstore.c optional xenhvm dev/xen/xenstore/xenstore_dev.c optional xenhvm dev/xen/xenstore/xenstored_dev.c optional xenhvm dev/xen/evtchn/evtchn_dev.c optional xenhvm dev/xen/privcmd/privcmd.c optional xenhvm dev/xen/gntdev/gntdev.c optional xenhvm dev/xen/debug/debug.c optional xenhvm dev/xl/if_xl.c optional xl pci dev/xl/xlphy.c optional xl pci fs/autofs/autofs.c optional autofs fs/autofs/autofs_vfsops.c optional autofs fs/autofs/autofs_vnops.c optional autofs fs/deadfs/dead_vnops.c standard fs/devfs/devfs_devs.c standard fs/devfs/devfs_dir.c standard fs/devfs/devfs_rule.c standard fs/devfs/devfs_vfsops.c standard fs/devfs/devfs_vnops.c standard fs/fdescfs/fdesc_vfsops.c optional fdescfs fs/fdescfs/fdesc_vnops.c optional fdescfs fs/fifofs/fifo_vnops.c standard fs/cuse/cuse.c optional cuse fs/fuse/fuse_device.c optional fuse fs/fuse/fuse_file.c optional fuse fs/fuse/fuse_internal.c optional fuse fs/fuse/fuse_io.c optional fuse fs/fuse/fuse_ipc.c optional fuse fs/fuse/fuse_main.c optional fuse fs/fuse/fuse_node.c optional fuse fs/fuse/fuse_vfsops.c optional fuse fs/fuse/fuse_vnops.c optional fuse fs/msdosfs/msdosfs_conv.c optional msdosfs fs/msdosfs/msdosfs_denode.c optional msdosfs fs/msdosfs/msdosfs_fat.c optional msdosfs fs/msdosfs/msdosfs_fileno.c optional msdosfs fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv fs/msdosfs/msdosfs_lookup.c optional msdosfs fs/msdosfs/msdosfs_vfsops.c optional msdosfs fs/msdosfs/msdosfs_vnops.c optional msdosfs fs/nandfs/bmap.c optional nandfs fs/nandfs/nandfs_alloc.c optional nandfs fs/nandfs/nandfs_bmap.c optional nandfs fs/nandfs/nandfs_buffer.c optional nandfs fs/nandfs/nandfs_cleaner.c optional nandfs fs/nandfs/nandfs_cpfile.c optional nandfs fs/nandfs/nandfs_dat.c optional nandfs fs/nandfs/nandfs_dir.c optional nandfs fs/nandfs/nandfs_ifile.c optional nandfs fs/nandfs/nandfs_segment.c optional nandfs fs/nandfs/nandfs_subr.c optional nandfs fs/nandfs/nandfs_sufile.c optional nandfs fs/nandfs/nandfs_vfsops.c optional nandfs fs/nandfs/nandfs_vnops.c optional nandfs fs/nfs/nfs_commonkrpc.c optional nfscl | nfsd fs/nfs/nfs_commonsubs.c optional nfscl | nfsd fs/nfs/nfs_commonport.c optional nfscl | nfsd fs/nfs/nfs_commonacl.c optional nfscl | nfsd fs/nfsclient/nfs_clcomsubs.c optional nfscl fs/nfsclient/nfs_clsubs.c optional nfscl fs/nfsclient/nfs_clstate.c optional nfscl fs/nfsclient/nfs_clkrpc.c optional nfscl fs/nfsclient/nfs_clrpcops.c optional nfscl fs/nfsclient/nfs_clvnops.c optional nfscl fs/nfsclient/nfs_clnode.c optional nfscl fs/nfsclient/nfs_clvfsops.c optional nfscl fs/nfsclient/nfs_clport.c optional nfscl fs/nfsclient/nfs_clbio.c optional nfscl fs/nfsclient/nfs_clnfsiod.c optional nfscl fs/nfsserver/nfs_fha_new.c optional nfsd inet fs/nfsserver/nfs_nfsdsocket.c optional nfsd inet fs/nfsserver/nfs_nfsdsubs.c optional nfsd inet fs/nfsserver/nfs_nfsdstate.c optional nfsd inet fs/nfsserver/nfs_nfsdkrpc.c optional nfsd inet fs/nfsserver/nfs_nfsdserv.c optional nfsd inet fs/nfsserver/nfs_nfsdport.c optional nfsd inet fs/nfsserver/nfs_nfsdcache.c optional nfsd inet fs/nullfs/null_subr.c optional nullfs fs/nullfs/null_vfsops.c optional nullfs fs/nullfs/null_vnops.c optional nullfs fs/procfs/procfs.c optional procfs fs/procfs/procfs_ctl.c optional procfs fs/procfs/procfs_dbregs.c optional procfs fs/procfs/procfs_fpregs.c optional procfs fs/procfs/procfs_ioctl.c optional procfs fs/procfs/procfs_map.c optional procfs fs/procfs/procfs_mem.c optional procfs fs/procfs/procfs_note.c optional procfs fs/procfs/procfs_osrel.c optional procfs fs/procfs/procfs_regs.c optional procfs fs/procfs/procfs_rlimit.c optional procfs fs/procfs/procfs_status.c optional procfs fs/procfs/procfs_type.c optional procfs fs/pseudofs/pseudofs.c optional pseudofs fs/pseudofs/pseudofs_fileno.c optional pseudofs fs/pseudofs/pseudofs_vncache.c optional pseudofs fs/pseudofs/pseudofs_vnops.c optional pseudofs fs/smbfs/smbfs_io.c optional smbfs fs/smbfs/smbfs_node.c optional smbfs fs/smbfs/smbfs_smb.c optional smbfs fs/smbfs/smbfs_subr.c optional smbfs fs/smbfs/smbfs_vfsops.c optional smbfs fs/smbfs/smbfs_vnops.c optional smbfs fs/udf/osta.c optional udf fs/udf/udf_iconv.c optional udf_iconv fs/udf/udf_vfsops.c optional udf fs/udf/udf_vnops.c optional udf fs/unionfs/union_subr.c optional unionfs fs/unionfs/union_vfsops.c optional unionfs fs/unionfs/union_vnops.c optional unionfs fs/tmpfs/tmpfs_vnops.c optional tmpfs fs/tmpfs/tmpfs_fifoops.c optional tmpfs fs/tmpfs/tmpfs_vfsops.c optional tmpfs fs/tmpfs/tmpfs_subr.c optional tmpfs gdb/gdb_cons.c optional gdb gdb/gdb_main.c optional gdb gdb/gdb_packet.c optional gdb geom/bde/g_bde.c optional geom_bde geom/bde/g_bde_crypt.c optional geom_bde geom/bde/g_bde_lock.c optional geom_bde geom/bde/g_bde_work.c optional geom_bde geom/cache/g_cache.c optional geom_cache geom/concat/g_concat.c optional geom_concat geom/eli/g_eli.c optional geom_eli geom/eli/g_eli_crypto.c optional geom_eli geom/eli/g_eli_ctl.c optional geom_eli geom/eli/g_eli_hmac.c optional geom_eli geom/eli/g_eli_integrity.c optional geom_eli geom/eli/g_eli_key.c optional geom_eli geom/eli/g_eli_key_cache.c optional geom_eli geom/eli/g_eli_privacy.c optional geom_eli geom/eli/pkcs5v2.c optional geom_eli geom/gate/g_gate.c optional geom_gate geom/geom_aes.c optional geom_aes geom/geom_bsd.c optional geom_bsd geom/geom_bsd_enc.c optional geom_bsd | geom_part_bsd geom/geom_ccd.c optional ccd | geom_ccd geom/geom_ctl.c standard geom/geom_dev.c standard geom/geom_disk.c standard geom/geom_dump.c standard geom/geom_event.c standard geom/geom_fox.c optional geom_fox geom/geom_flashmap.c optional fdt cfi | fdt nand | fdt mx25l geom/geom_io.c standard geom/geom_kern.c standard geom/geom_map.c optional geom_map geom/geom_mbr.c optional geom_mbr geom/geom_mbr_enc.c optional geom_mbr geom/geom_pc98.c optional geom_pc98 geom/geom_pc98_enc.c optional geom_pc98 geom/geom_redboot.c optional geom_redboot geom/geom_slice.c standard geom/geom_subr.c standard geom/geom_sunlabel.c optional geom_sunlabel geom/geom_sunlabel_enc.c optional geom_sunlabel geom/geom_vfs.c standard geom/geom_vol_ffs.c optional geom_vol geom/journal/g_journal.c optional geom_journal geom/journal/g_journal_ufs.c optional geom_journal geom/label/g_label.c optional geom_label | geom_label_gpt geom/label/g_label_ext2fs.c optional geom_label geom/label/g_label_iso9660.c optional geom_label geom/label/g_label_msdosfs.c optional geom_label geom/label/g_label_ntfs.c optional geom_label geom/label/g_label_reiserfs.c optional geom_label geom/label/g_label_ufs.c optional geom_label geom/label/g_label_gpt.c optional geom_label | geom_label_gpt geom/label/g_label_disk_ident.c optional geom_label geom/linux_lvm/g_linux_lvm.c optional geom_linux_lvm geom/mirror/g_mirror.c optional geom_mirror geom/mirror/g_mirror_ctl.c optional geom_mirror geom/mountver/g_mountver.c optional geom_mountver geom/multipath/g_multipath.c optional geom_multipath geom/nop/g_nop.c optional geom_nop geom/part/g_part.c standard geom/part/g_part_if.m standard geom/part/g_part_apm.c optional geom_part_apm geom/part/g_part_bsd.c optional geom_part_bsd geom/part/g_part_bsd64.c optional geom_part_bsd64 geom/part/g_part_ebr.c optional geom_part_ebr geom/part/g_part_gpt.c optional geom_part_gpt geom/part/g_part_ldm.c optional geom_part_ldm geom/part/g_part_mbr.c optional geom_part_mbr geom/part/g_part_pc98.c optional geom_part_pc98 geom/part/g_part_vtoc8.c optional geom_part_vtoc8 geom/raid/g_raid.c optional geom_raid geom/raid/g_raid_ctl.c optional geom_raid geom/raid/g_raid_md_if.m optional geom_raid geom/raid/g_raid_tr_if.m optional geom_raid geom/raid/md_ddf.c optional geom_raid geom/raid/md_intel.c optional geom_raid geom/raid/md_jmicron.c optional geom_raid geom/raid/md_nvidia.c optional geom_raid geom/raid/md_promise.c optional geom_raid geom/raid/md_sii.c optional geom_raid geom/raid/tr_concat.c optional geom_raid geom/raid/tr_raid0.c optional geom_raid geom/raid/tr_raid1.c optional geom_raid geom/raid/tr_raid1e.c optional geom_raid geom/raid/tr_raid5.c optional geom_raid geom/raid3/g_raid3.c optional geom_raid3 geom/raid3/g_raid3_ctl.c optional geom_raid3 geom/shsec/g_shsec.c optional geom_shsec geom/stripe/g_stripe.c optional geom_stripe contrib/xz-embedded/freebsd/xz_malloc.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_crc32.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_bcj.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_lzma2.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_stream.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" geom/uzip/g_uzip.c optional geom_uzip geom/uzip/g_uzip_lzma.c optional geom_uzip geom/uzip/g_uzip_wrkthr.c optional geom_uzip geom/uzip/g_uzip_zlib.c optional geom_uzip geom/vinum/geom_vinum.c optional geom_vinum geom/vinum/geom_vinum_create.c optional geom_vinum geom/vinum/geom_vinum_drive.c optional geom_vinum geom/vinum/geom_vinum_plex.c optional geom_vinum geom/vinum/geom_vinum_volume.c optional geom_vinum geom/vinum/geom_vinum_subr.c optional geom_vinum geom/vinum/geom_vinum_raid5.c optional geom_vinum geom/vinum/geom_vinum_share.c optional geom_vinum geom/vinum/geom_vinum_list.c optional geom_vinum geom/vinum/geom_vinum_rm.c optional geom_vinum geom/vinum/geom_vinum_init.c optional geom_vinum geom/vinum/geom_vinum_state.c optional geom_vinum geom/vinum/geom_vinum_rename.c optional geom_vinum geom/vinum/geom_vinum_move.c optional geom_vinum geom/vinum/geom_vinum_events.c optional geom_vinum geom/virstor/binstream.c optional geom_virstor geom/virstor/g_virstor.c optional geom_virstor geom/virstor/g_virstor_md.c optional geom_virstor geom/zero/g_zero.c optional geom_zero fs/ext2fs/ext2_alloc.c optional ext2fs fs/ext2fs/ext2_balloc.c optional ext2fs fs/ext2fs/ext2_bmap.c optional ext2fs fs/ext2fs/ext2_extents.c optional ext2fs fs/ext2fs/ext2_inode.c optional ext2fs fs/ext2fs/ext2_inode_cnv.c optional ext2fs fs/ext2fs/ext2_hash.c optional ext2fs fs/ext2fs/ext2_htree.c optional ext2fs fs/ext2fs/ext2_lookup.c optional ext2fs fs/ext2fs/ext2_subr.c optional ext2fs fs/ext2fs/ext2_vfsops.c optional ext2fs fs/ext2fs/ext2_vnops.c optional ext2fs # isa/isa_if.m standard isa/isa_common.c optional isa isa/isahint.c optional isa isa/pnp.c optional isa isapnp isa/pnpparse.c optional isa isapnp fs/cd9660/cd9660_bmap.c optional cd9660 fs/cd9660/cd9660_lookup.c optional cd9660 fs/cd9660/cd9660_node.c optional cd9660 fs/cd9660/cd9660_rrip.c optional cd9660 fs/cd9660/cd9660_util.c optional cd9660 fs/cd9660/cd9660_vfsops.c optional cd9660 fs/cd9660/cd9660_vnops.c optional cd9660 fs/cd9660/cd9660_iconv.c optional cd9660_iconv kern/bus_if.m standard kern/clock_if.m standard kern/cpufreq_if.m standard kern/device_if.m standard kern/imgact_binmisc.c optional imagact_binmisc kern/imgact_elf.c standard kern/imgact_elf32.c optional compat_freebsd32 kern/imgact_shell.c standard kern/inflate.c optional gzip kern/init_main.c standard kern/init_sysent.c standard kern/ksched.c optional _kposix_priority_scheduling kern/kern_acct.c standard kern/kern_alq.c optional alq kern/kern_clock.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cons.c standard kern/kern_cpu.c standard kern/kern_cpuset.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_dtrace.c optional kdtrace_hooks kern/kern_dump.c standard kern/kern_environment.c standard kern/kern_et.c standard kern/kern_event.c standard kern/kern_exec.c standard kern/kern_exit.c standard kern/kern_fail.c standard kern/kern_ffclock.c standard kern/kern_fork.c standard kern/kern_gzio.c optional gzio kern/kern_hhook.c standard kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard kern/kern_khelp.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard kern/kern_linker.c standard kern/kern_lock.c standard kern/kern_lockf.c standard kern/kern_lockstat.c optional kdtrace_hooks kern/kern_loginclass.c standard kern/kern_malloc.c standard kern/kern_mbuf.c standard kern/kern_mib.c standard kern/kern_module.c standard kern/kern_mtxpool.c standard kern/kern_mutex.c standard kern/kern_ntptime.c standard kern/kern_numa.c standard kern/kern_osd.c standard kern/kern_physio.c standard kern/kern_pmc.c standard kern/kern_poll.c optional device_polling kern/kern_priv.c standard kern/kern_proc.c standard kern/kern_procctl.c standard kern/kern_prot.c standard kern/kern_racct.c standard kern/kern_rangelock.c standard kern/kern_rctl.c standard kern/kern_resource.c standard kern/kern_rmlock.c standard kern/kern_rwlock.c standard kern/kern_sdt.c optional kdtrace_hooks kern/kern_sema.c standard kern/kern_sendfile.c standard kern/kern_sharedpage.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_switch.c standard kern/kern_sx.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard kern/kern_umtx.c standard kern/kern_uuid.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/linker_if.m standard kern/md4c.c optional netsmb kern/md5c.c standard kern/p1003_1b.c standard kern/posix4_mib.c standard kern/sched_4bsd.c optional sched_4bsd kern/sched_ule.c optional sched_ule kern/serdev_if.m standard kern/stack_protector.c standard \ compile-with "${NORMAL_C:N-fstack-protector*}" kern/subr_acl_nfs4.c optional ufs_acl | zfs kern/subr_acl_posix1e.c optional ufs_acl kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard kern/subr_bus_dma.c standard kern/subr_bufring.c standard kern/subr_capability.c standard kern/subr_clock.c standard kern/subr_counter.c standard kern/subr_devstat.c standard kern/subr_disk.c standard kern/subr_eventhandler.c standard kern/subr_fattime.c standard kern/subr_firmware.c optional firmware kern/subr_gtaskqueue.c standard kern/subr_hash.c standard kern/subr_hints.c standard kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard kern/subr_log.c standard kern/subr_mbpool.c optional libmbpool kern/subr_mchain.c optional libmchain kern/subr_module.c standard kern/subr_msgbuf.c standard kern/subr_param.c standard kern/subr_pcpu.c standard kern/subr_pctrie.c standard kern/subr_power.c standard kern/subr_prf.c standard kern/subr_prof.c standard kern/subr_rman.c standard kern/subr_rtc.c standard kern/subr_sbuf.c standard kern/subr_scanf.c standard kern/subr_sglist.c standard kern/subr_sleepqueue.c standard kern/subr_smp.c standard kern/subr_stack.c optional ddb | stack | ktr kern/subr_taskqueue.c standard kern/subr_terminal.c optional vt kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_uio.c standard kern/subr_unit.c standard kern/subr_vmem.c standard kern/subr_witness.c optional witness kern/sys_capability.c standard kern/sys_generic.c standard kern/sys_pipe.c standard kern/sys_procdesc.c standard kern/sys_process.c standard kern/sys_socket.c standard kern/syscalls.c standard kern/sysv_ipc.c standard kern/sysv_msg.c optional sysvmsg kern/sysv_sem.c optional sysvsem kern/sysv_shm.c optional sysvshm kern/tty.c standard kern/tty_compat.c optional compat_43tty kern/tty_info.c standard kern/tty_inq.c standard kern/tty_outq.c standard kern/tty_pts.c standard kern/tty_tty.c standard kern/tty_ttydisc.c standard kern/uipc_accf.c standard kern/uipc_debug.c optional ddb kern/uipc_domain.c standard kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard kern/uipc_mbufhash.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_shm.c standard kern/uipc_sockbuf.c standard kern/uipc_socket.c standard kern/uipc_syscalls.c standard kern/uipc_usrreq.c standard kern/vfs_acl.c standard kern/vfs_aio.c standard kern/vfs_bio.c standard kern/vfs_cache.c standard kern/vfs_cluster.c standard kern/vfs_default.c standard kern/vfs_export.c standard kern/vfs_extattr.c standard kern/vfs_hash.c standard kern/vfs_init.c standard kern/vfs_lookup.c standard kern/vfs_mount.c standard kern/vfs_mountroot.c standard kern/vfs_subr.c standard kern/vfs_syscalls.c standard kern/vfs_vnops.c standard # # Kernel GSS-API # gssd.h optional kgssapi \ dependency "$S/kgssapi/gssd.x" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/kgssapi/gssd.x | grep -v pthread.h > gssd.h" \ no-obj no-implicit-rule before-depend local \ clean "gssd.h" gssd_xdr.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/kgssapi/gssd.x -o gssd_xdr.c" \ no-implicit-rule before-depend local \ clean "gssd_xdr.c" gssd_clnt.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/kgssapi/gssd.x | grep -v string.h > gssd_clnt.c" \ no-implicit-rule before-depend local \ clean "gssd_clnt.c" kgssapi/gss_accept_sec_context.c optional kgssapi kgssapi/gss_add_oid_set_member.c optional kgssapi kgssapi/gss_acquire_cred.c optional kgssapi kgssapi/gss_canonicalize_name.c optional kgssapi kgssapi/gss_create_empty_oid_set.c optional kgssapi kgssapi/gss_delete_sec_context.c optional kgssapi kgssapi/gss_display_status.c optional kgssapi kgssapi/gss_export_name.c optional kgssapi kgssapi/gss_get_mic.c optional kgssapi kgssapi/gss_init_sec_context.c optional kgssapi kgssapi/gss_impl.c optional kgssapi kgssapi/gss_import_name.c optional kgssapi kgssapi/gss_names.c optional kgssapi kgssapi/gss_pname_to_uid.c optional kgssapi kgssapi/gss_release_buffer.c optional kgssapi kgssapi/gss_release_cred.c optional kgssapi kgssapi/gss_release_name.c optional kgssapi kgssapi/gss_release_oid_set.c optional kgssapi kgssapi/gss_set_cred_option.c optional kgssapi kgssapi/gss_test_oid_set_member.c optional kgssapi kgssapi/gss_unwrap.c optional kgssapi kgssapi/gss_verify_mic.c optional kgssapi kgssapi/gss_wrap.c optional kgssapi kgssapi/gss_wrap_size_limit.c optional kgssapi kgssapi/gssd_prot.c optional kgssapi kgssapi/krb5/krb5_mech.c optional kgssapi kgssapi/krb5/kcrypto.c optional kgssapi kgssapi/krb5/kcrypto_aes.c optional kgssapi kgssapi/krb5/kcrypto_arcfour.c optional kgssapi kgssapi/krb5/kcrypto_des.c optional kgssapi kgssapi/krb5/kcrypto_des3.c optional kgssapi kgssapi/kgss_if.m optional kgssapi kgssapi/gsstest.c optional kgssapi_debug # These files in libkern/ are those needed by all architectures. Some # of the files in libkern/ are only needed on some architectures, e.g., # libkern/divdi3.c is needed by i386 but not alpha. Also, some of these # routines may be optimized for a particular platform. In either case, # the file should be moved to conf/files. from here. # libkern/arc4random.c standard libkern/asprintf.c standard libkern/bcd.c standard libkern/bsearch.c standard libkern/crc32.c standard libkern/explicit_bzero.c standard libkern/fnmatch.c standard libkern/iconv.c optional libiconv libkern/iconv_converter_if.m optional libiconv libkern/iconv_ucs.c optional libiconv libkern/iconv_xlat.c optional libiconv libkern/iconv_xlat16.c optional libiconv libkern/inet_aton.c standard libkern/inet_ntoa.c standard libkern/inet_ntop.c standard libkern/inet_pton.c standard libkern/jenkins_hash.c standard libkern/murmur3_32.c standard libkern/mcount.c optional profiling-routine libkern/memcchr.c standard libkern/memchr.c standard libkern/memcmp.c standard libkern/memmem.c optional gdb libkern/qsort.c standard libkern/qsort_r.c standard libkern/random.c standard libkern/scanc.c standard libkern/strcasecmp.c standard libkern/strcat.c standard libkern/strchr.c standard libkern/strcmp.c standard libkern/strcpy.c standard libkern/strcspn.c standard libkern/strdup.c standard libkern/strndup.c standard libkern/strlcat.c standard libkern/strlcpy.c standard libkern/strlen.c standard libkern/strncat.c standard libkern/strncmp.c standard libkern/strncpy.c standard libkern/strnlen.c standard libkern/strrchr.c standard libkern/strsep.c standard libkern/strspn.c standard libkern/strstr.c standard libkern/strtol.c standard libkern/strtoq.c standard libkern/strtoul.c standard libkern/strtouq.c standard libkern/strvalid.c standard libkern/timingsafe_bcmp.c standard libkern/zlib.c optional crypto | geom_uzip | ipsec | \ mxge | netgraph_deflate | \ ddb_ctf | gzio net/altq/altq_cbq.c optional altq net/altq/altq_cdnr.c optional altq net/altq/altq_codel.c optional altq net/altq/altq_hfsc.c optional altq net/altq/altq_fairq.c optional altq net/altq/altq_priq.c optional altq net/altq/altq_red.c optional altq net/altq/altq_rio.c optional altq net/altq/altq_rmclass.c optional altq net/altq/altq_subr.c optional altq net/bpf.c standard net/bpf_buffer.c optional bpf net/bpf_jitter.c optional bpf_jitter net/bpf_filter.c optional bpf | netgraph_bpf net/bpf_zerocopy.c optional bpf net/bridgestp.c optional bridge | if_bridge net/flowtable.c optional flowtable inet | flowtable inet6 net/ieee8023ad_lacp.c optional lagg net/if.c standard net/if_arcsubr.c optional arcnet net/if_atmsubr.c optional atm net/if_bridge.c optional bridge inet | if_bridge inet net/if_clone.c standard net/if_dead.c standard net/if_debug.c optional ddb net/if_disc.c optional disc net/if_edsc.c optional edsc net/if_enc.c optional enc inet | enc inet6 net/if_epair.c optional epair net/if_ethersubr.c optional ether net/if_fddisubr.c optional fddi net/if_fwsubr.c optional fwip net/if_gif.c optional gif inet | gif inet6 | \ netgraph_gif inet | netgraph_gif inet6 net/if_gre.c optional gre inet | gre inet6 net/if_iso88025subr.c optional token net/if_lagg.c optional lagg net/if_loop.c optional loop net/if_llatbl.c standard net/if_me.c optional me inet net/if_media.c standard net/if_mib.c standard net/if_spppfr.c optional sppp | netgraph_sppp net/if_spppsubr.c optional sppp | netgraph_sppp net/if_stf.c optional stf inet inet6 net/if_tun.c optional tun net/if_tap.c optional tap net/if_vlan.c optional vlan net/if_vxlan.c optional vxlan inet | vxlan inet6 net/ifdi_if.m optional ether pci net/iflib.c optional ether pci net/mp_ring.c optional ether net/mppcc.c optional netgraph_mppc_compression net/mppcd.c optional netgraph_mppc_compression net/netisr.c standard net/pfil.c optional ether | inet net/radix.c standard net/radix_mpath.c standard net/raw_cb.c standard net/raw_usrreq.c standard net/route.c standard net/rss_config.c optional inet rss | inet6 rss net/rtsock.c standard net/slcompress.c optional netgraph_vjc | sppp | \ netgraph_sppp net/toeplitz.c optional inet rss | inet6 rss net/vnet.c optional vimage net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan wlan_acl net80211/ieee80211_action.c optional wlan net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_adhoc.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_amrr.c optional wlan | wlan_amrr net80211/ieee80211_crypto.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_crypto_ccmp.c optional wlan wlan_ccmp net80211/ieee80211_crypto_none.c optional wlan net80211/ieee80211_crypto_tkip.c optional wlan wlan_tkip net80211/ieee80211_crypto_wep.c optional wlan wlan_wep net80211/ieee80211_ddb.c optional wlan ddb net80211/ieee80211_dfs.c optional wlan net80211/ieee80211_freebsd.c optional wlan net80211/ieee80211_hostap.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ht.c optional wlan net80211/ieee80211_hwmp.c optional wlan ieee80211_support_mesh net80211/ieee80211_input.c optional wlan net80211/ieee80211_ioctl.c optional wlan net80211/ieee80211_mesh.c optional wlan ieee80211_support_mesh \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_monitor.c optional wlan net80211/ieee80211_node.c optional wlan net80211/ieee80211_output.c optional wlan net80211/ieee80211_phy.c optional wlan net80211/ieee80211_power.c optional wlan net80211/ieee80211_proto.c optional wlan net80211/ieee80211_radiotap.c optional wlan net80211/ieee80211_ratectl.c optional wlan net80211/ieee80211_ratectl_none.c optional wlan net80211/ieee80211_regdomain.c optional wlan net80211/ieee80211_rssadapt.c optional wlan wlan_rssadapt net80211/ieee80211_scan.c optional wlan net80211/ieee80211_scan_sta.c optional wlan net80211/ieee80211_sta.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_superg.c optional wlan ieee80211_support_superg net80211/ieee80211_scan_sw.c optional wlan net80211/ieee80211_tdma.c optional wlan ieee80211_support_tdma net80211/ieee80211_vht.c optional wlan net80211/ieee80211_wds.c optional wlan net80211/ieee80211_xauth.c optional wlan wlan_xauth net80211/ieee80211_alq.c optional wlan ieee80211_alq netgraph/atm/ccatm/ng_ccatm.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/ng_atm.c optional ngatm_atm netgraph/atm/ngatmbase.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscfu/ng_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscop/ng_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/uni/ng_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c optional netgraph_bluetooth_bt3c netgraph/bluetooth/drivers/h4/ng_h4.c optional netgraph_bluetooth_h4 netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt usb netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw usb netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_sco.c optional netgraph_bluetooth_socket netgraph/netflow/netflow.c optional netgraph_netflow netgraph/netflow/netflow_v9.c optional netgraph_netflow netgraph/netflow/ng_netflow.c optional netgraph_netflow netgraph/ng_UI.c optional netgraph_UI netgraph/ng_async.c optional netgraph_async netgraph/ng_atmllc.c optional netgraph_atmllc netgraph/ng_base.c optional netgraph netgraph/ng_bpf.c optional netgraph_bpf netgraph/ng_bridge.c optional netgraph_bridge netgraph/ng_car.c optional netgraph_car netgraph/ng_cisco.c optional netgraph_cisco netgraph/ng_deflate.c optional netgraph_deflate netgraph/ng_device.c optional netgraph_device netgraph/ng_echo.c optional netgraph_echo netgraph/ng_eiface.c optional netgraph_eiface netgraph/ng_ether.c optional netgraph_ether netgraph/ng_ether_echo.c optional netgraph_ether_echo netgraph/ng_frame_relay.c optional netgraph_frame_relay netgraph/ng_gif.c optional netgraph_gif inet6 | netgraph_gif inet netgraph/ng_gif_demux.c optional netgraph_gif_demux netgraph/ng_hole.c optional netgraph_hole netgraph/ng_iface.c optional netgraph_iface netgraph/ng_ip_input.c optional netgraph_ip_input netgraph/ng_ipfw.c optional netgraph_ipfw inet ipfirewall netgraph/ng_ksocket.c optional netgraph_ksocket netgraph/ng_l2tp.c optional netgraph_l2tp netgraph/ng_lmi.c optional netgraph_lmi netgraph/ng_mppc.c optional netgraph_mppc_compression | \ netgraph_mppc_encryption netgraph/ng_nat.c optional netgraph_nat inet libalias netgraph/ng_one2many.c optional netgraph_one2many netgraph/ng_parse.c optional netgraph netgraph/ng_patch.c optional netgraph_patch netgraph/ng_pipe.c optional netgraph_pipe netgraph/ng_ppp.c optional netgraph_ppp netgraph/ng_pppoe.c optional netgraph_pppoe netgraph/ng_pptpgre.c optional netgraph_pptpgre netgraph/ng_pred1.c optional netgraph_pred1 netgraph/ng_rfc1490.c optional netgraph_rfc1490 netgraph/ng_socket.c optional netgraph_socket netgraph/ng_split.c optional netgraph_split netgraph/ng_sppp.c optional netgraph_sppp netgraph/ng_tag.c optional netgraph_tag netgraph/ng_tcpmss.c optional netgraph_tcpmss netgraph/ng_tee.c optional netgraph_tee netgraph/ng_tty.c optional netgraph_tty netgraph/ng_vjc.c optional netgraph_vjc netgraph/ng_vlan.c optional netgraph_vlan netinet/accf_data.c optional accept_filter_data inet netinet/accf_dns.c optional accept_filter_dns inet netinet/accf_http.c optional accept_filter_http inet netinet/if_atm.c optional atm netinet/if_ether.c optional inet ether netinet/igmp.c optional inet netinet/in.c optional inet netinet/in_debug.c optional inet ddb netinet/in_kdtrace.c optional inet | inet6 netinet/ip_carp.c optional inet carp | inet6 carp netinet/in_fib.c optional inet netinet/in_gif.c optional gif inet | netgraph_gif inet netinet/ip_gre.c optional gre inet netinet/ip_id.c optional inet netinet/in_jail.c optional inet netinet/in_mcast.c optional inet netinet/in_pcb.c optional inet | inet6 netinet/in_pcbgroup.c optional inet pcbgroup | inet6 pcbgroup netinet/in_prot.c optional inet | inet6 netinet/in_proto.c optional inet | inet6 netinet/in_rmx.c optional inet netinet/in_rss.c optional inet rss netinet/ip_divert.c optional inet ipdivert ipfirewall netinet/ip_ecn.c optional inet | inet6 netinet/ip_encap.c optional inet | inet6 netinet/ip_fastfwd.c optional inet netinet/ip_icmp.c optional inet | inet6 netinet/ip_input.c optional inet netinet/ip_ipsec.c optional inet ipsec netinet/ip_mroute.c optional mrouting inet netinet/ip_options.c optional inet netinet/ip_output.c optional inet netinet/ip_reass.c optional inet netinet/raw_ip.c optional inet | inet6 netinet/cc/cc.c optional inet | inet6 netinet/cc/cc_newreno.c optional inet | inet6 netinet/sctp_asconf.c optional inet sctp | inet6 sctp netinet/sctp_auth.c optional inet sctp | inet6 sctp netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp netinet/sctp_cc_functions.c optional inet sctp | inet6 sctp netinet/sctp_crc32.c optional inet sctp | inet6 sctp netinet/sctp_indata.c optional inet sctp | inet6 sctp netinet/sctp_input.c optional inet sctp | inet6 sctp netinet/sctp_output.c optional inet sctp | inet6 sctp netinet/sctp_pcb.c optional inet sctp | inet6 sctp netinet/sctp_peeloff.c optional inet sctp | inet6 sctp netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp netinet/sctp_syscalls.c optional inet sctp | inet6 sctp netinet/sctp_sysctl.c optional inet sctp | inet6 sctp netinet/sctp_timer.c optional inet sctp | inet6 sctp netinet/sctp_usrreq.c optional inet sctp | inet6 sctp netinet/sctputil.c optional inet sctp | inet6 sctp netinet/siftr.c optional inet siftr alq | inet6 siftr alq netinet/tcp_debug.c optional tcpdebug netinet/tcp_fastopen.c optional inet tcp_rfc7413 | inet6 tcp_rfc7413 netinet/tcp_hostcache.c optional inet | inet6 netinet/tcp_input.c optional inet | inet6 netinet/tcp_lro.c optional inet | inet6 netinet/tcp_output.c optional inet | inet6 netinet/tcp_offload.c optional tcp_offload inet | tcp_offload inet6 netinet/tcp_pcap.c optional inet tcppcap | inet6 tcppcap netinet/tcp_reass.c optional inet | inet6 netinet/tcp_sack.c optional inet | inet6 netinet/tcp_subr.c optional inet | inet6 netinet/tcp_syncache.c optional inet | inet6 netinet/tcp_timer.c optional inet | inet6 netinet/tcp_timewait.c optional inet | inet6 netinet/tcp_usrreq.c optional inet | inet6 netinet/udp_usrreq.c optional inet | inet6 netinet/libalias/alias.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_db.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_mod.c optional libalias | netgraph_nat netinet/libalias/alias_proxy.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_util.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_sctp.c optional libalias inet | netgraph_nat inet netinet6/dest6.c optional inet6 netinet6/frag6.c optional inet6 netinet6/icmp6.c optional inet6 netinet6/in6.c optional inet6 netinet6/in6_cksum.c optional inet6 netinet6/in6_fib.c optional inet6 netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6 netinet6/in6_ifattach.c optional inet6 netinet6/in6_jail.c optional inet6 netinet6/in6_mcast.c optional inet6 netinet6/in6_pcb.c optional inet6 netinet6/in6_pcbgroup.c optional inet6 pcbgroup netinet6/in6_proto.c optional inet6 netinet6/in6_rmx.c optional inet6 netinet6/in6_rss.c optional inet6 rss netinet6/in6_src.c optional inet6 netinet6/ip6_fastfwd.c optional inet6 netinet6/ip6_forward.c optional inet6 netinet6/ip6_gre.c optional gre inet6 netinet6/ip6_id.c optional inet6 netinet6/ip6_input.c optional inet6 netinet6/ip6_mroute.c optional mrouting inet6 netinet6/ip6_output.c optional inet6 netinet6/ip6_ipsec.c optional inet6 ipsec netinet6/mld6.c optional inet6 netinet6/nd6.c optional inet6 netinet6/nd6_nbr.c optional inet6 netinet6/nd6_rtr.c optional inet6 netinet6/raw_ip6.c optional inet6 netinet6/route6.c optional inet6 netinet6/scope6.c optional inet6 netinet6/sctp6_usrreq.c optional inet6 sctp netinet6/udp6_usrreq.c optional inet6 netipsec/ipsec.c optional ipsec inet | ipsec inet6 netipsec/ipsec_input.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mbuf.c optional ipsec inet | ipsec inet6 netipsec/ipsec_output.c optional ipsec inet | ipsec inet6 netipsec/key.c optional ipsec inet | ipsec inet6 netipsec/key_debug.c optional ipsec inet | ipsec inet6 netipsec/keysock.c optional ipsec inet | ipsec inet6 netipsec/xform_ah.c optional ipsec inet | ipsec inet6 netipsec/xform_esp.c optional ipsec inet | ipsec inet6 netipsec/xform_ipcomp.c optional ipsec inet | ipsec inet6 netipsec/xform_tcp.c optional ipsec inet tcp_signature | \ ipsec inet6 tcp_signature netnatm/natm.c optional natm netnatm/natm_pcb.c optional natm netnatm/natm_proto.c optional natm netpfil/ipfw/dn_aqm_codel.c optional inet dummynet netpfil/ipfw/dn_aqm_pie.c optional inet dummynet netpfil/ipfw/dn_heap.c optional inet dummynet netpfil/ipfw/dn_sched_fifo.c optional inet dummynet netpfil/ipfw/dn_sched_fq_codel.c optional inet dummynet netpfil/ipfw/dn_sched_fq_pie.c optional inet dummynet netpfil/ipfw/dn_sched_prio.c optional inet dummynet netpfil/ipfw/dn_sched_qfq.c optional inet dummynet netpfil/ipfw/dn_sched_rr.c optional inet dummynet netpfil/ipfw/dn_sched_wf2q.c optional inet dummynet netpfil/ipfw/ip_dummynet.c optional inet dummynet netpfil/ipfw/ip_dn_io.c optional inet dummynet netpfil/ipfw/ip_dn_glue.c optional inet dummynet netpfil/ipfw/ip_fw2.c optional inet ipfirewall netpfil/ipfw/ip_fw_bpf.c optional inet ipfirewall netpfil/ipfw/ip_fw_dynamic.c optional inet ipfirewall netpfil/ipfw/ip_fw_eaction.c optional inet ipfirewall netpfil/ipfw/ip_fw_log.c optional inet ipfirewall netpfil/ipfw/ip_fw_pfil.c optional inet ipfirewall netpfil/ipfw/ip_fw_sockopt.c optional inet ipfirewall netpfil/ipfw/ip_fw_table.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_algo.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_value.c optional inet ipfirewall netpfil/ipfw/ip_fw_iface.c optional inet ipfirewall netpfil/ipfw/ip_fw_nat.c optional inet ipfirewall_nat netpfil/ipfw/nat64/ip_fw_nat64.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64lsn.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64lsn_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64stl.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64stl_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64_translate.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nptv6/ip_fw_nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/ipfw/nptv6/nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/pf/if_pflog.c optional pflog pf inet netpfil/pf/if_pfsync.c optional pfsync pf inet netpfil/pf/pf.c optional pf inet netpfil/pf/pf_if.c optional pf inet netpfil/pf/pf_ioctl.c optional pf inet netpfil/pf/pf_lb.c optional pf inet netpfil/pf/pf_norm.c optional pf inet netpfil/pf/pf_osfp.c optional pf inet netpfil/pf/pf_ruleset.c optional pf inet netpfil/pf/pf_table.c optional pf inet netpfil/pf/in4_cksum.c optional pf inet netsmb/smb_conn.c optional netsmb netsmb/smb_crypt.c optional netsmb netsmb/smb_dev.c optional netsmb netsmb/smb_iod.c optional netsmb netsmb/smb_rq.c optional netsmb netsmb/smb_smb.c optional netsmb netsmb/smb_subr.c optional netsmb netsmb/smb_trantcp.c optional netsmb netsmb/smb_usr.c optional netsmb nfs/bootp_subr.c optional bootp nfscl nfs/krpc_subr.c optional bootp nfscl nfs/nfs_diskless.c optional nfscl nfs_root nfs/nfs_fha.c optional nfsd nfs/nfs_lock.c optional nfscl | nfslockd | nfsd nfs/nfs_nfssvc.c optional nfscl | nfsd nlm/nlm_advlock.c optional nfslockd | nfsd nlm/nlm_prot_clnt.c optional nfslockd | nfsd nlm/nlm_prot_impl.c optional nfslockd | nfsd nlm/nlm_prot_server.c optional nfslockd | nfsd nlm/nlm_prot_svc.c optional nfslockd | nfsd nlm/nlm_prot_xdr.c optional nfslockd | nfsd nlm/sm_inter_xdr.c optional nfslockd | nfsd # Linux Kernel Programming Interface compat/linuxkpi/common/src/linux_kmod.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_compat.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_pci.c optional compat_linuxkpi pci \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_idr.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_radix.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_usb.c optional compat_linuxkpi usb \ compile-with "${LINUXKPI_C}" # OpenFabrics Enterprise Distribution (Infiniband) ofed/drivers/infiniband/core/addr.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/agent.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/cache.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" # XXX Mad.c must be ordered before cm.c for sysinit sets to occur in # the correct order. ofed/drivers/infiniband/core/mad.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/cm.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/ -Wno-unused-function" ofed/drivers/infiniband/core/cma.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/device.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/fmr_pool.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/iwcm.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/mad_rmpp.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/multicast.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/packer.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/peer_mem.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/sa_query.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/smi.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/sysfs.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/ucm.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/ucma.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/ud_header.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/umem.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/user_mad.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/uverbs_cmd.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/uverbs_main.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/uverbs_marshall.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/core/verbs.c optional ofed \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/core/" ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_fs.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_vlan.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_main.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_rx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mcg.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cm.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_ah.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_doorbell.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mad.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_main.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_exp.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mr.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_qp.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_srq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_wc.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_alloc.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_catas.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cmd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_eq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_fw.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_icm.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_intf.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_main.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mcg.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mr.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_pd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_port.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_profile.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_qp.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_reset.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_sense.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_srq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_resource_tracker.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_sys_tune.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_cq.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_main.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_netdev.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_port.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_resources.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_rx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_tx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_flow_table.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_health.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mad.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_main.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mcg.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mr.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pagealloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_port.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_qp.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_srq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_transobj.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_uar.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_vport.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_ethtool.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_main.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_tx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_flow_table.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_rx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_txrx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_allocator.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_av.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_catas.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_cmd.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_cq.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_eq.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_mad.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_main.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_mcg.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_memfree.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_mr.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_pd.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_profile.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_provider.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_qp.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_reset.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_srq.c optional mthca \ compile-with "${OFED_C}" ofed/drivers/infiniband/hw/mthca/mthca_uar.c optional mthca \ compile-with "${OFED_C}" # crypto support opencrypto/cast.c optional crypto | ipsec opencrypto/criov.c optional crypto | ipsec opencrypto/crypto.c optional crypto | ipsec opencrypto/cryptodev.c optional cryptodev opencrypto/cryptodev_if.m optional crypto | ipsec opencrypto/cryptosoft.c optional crypto | ipsec opencrypto/cryptodeflate.c optional crypto | ipsec opencrypto/gmac.c optional crypto | ipsec opencrypto/gfmult.c optional crypto | ipsec opencrypto/rmd160.c optional crypto | ipsec opencrypto/skipjack.c optional crypto | ipsec opencrypto/xform.c optional crypto | ipsec rpc/auth_none.c optional krpc | nfslockd | nfscl | nfsd rpc/auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/authunix_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_bck.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_rc.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_vc.c optional krpc | nfslockd | nfscl | nfsd rpc/getnetconfig.c optional krpc | nfslockd | nfscl | nfsd rpc/replay.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_callmsg.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_clnt.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/svc.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_vc.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcsec_gss/rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_conf.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_misc.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_prot.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/svc_rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi security/audit/audit.c optional audit security/audit/audit_arg.c optional audit security/audit/audit_bsm.c optional audit security/audit/audit_bsm_klib.c optional audit security/audit/audit_pipe.c optional audit security/audit/audit_syscalls.c standard security/audit/audit_trigger.c optional audit security/audit/audit_worker.c optional audit security/audit/bsm_domain.c optional audit security/audit/bsm_errno.c optional audit security/audit/bsm_fcntl.c optional audit security/audit/bsm_socket_type.c optional audit security/audit/bsm_token.c optional audit security/mac/mac_audit.c optional mac audit security/mac/mac_cred.c optional mac security/mac/mac_framework.c optional mac security/mac/mac_inet.c optional mac inet | mac inet6 security/mac/mac_inet6.c optional mac inet6 security/mac/mac_label.c optional mac security/mac/mac_net.c optional mac security/mac/mac_pipe.c optional mac security/mac/mac_posix_sem.c optional mac security/mac/mac_posix_shm.c optional mac security/mac/mac_priv.c optional mac security/mac/mac_process.c optional mac security/mac/mac_socket.c optional mac security/mac/mac_syscalls.c standard security/mac/mac_system.c optional mac security/mac/mac_sysv_msg.c optional mac security/mac/mac_sysv_sem.c optional mac security/mac/mac_sysv_shm.c optional mac security/mac/mac_vfs.c optional mac security/mac_biba/mac_biba.c optional mac_biba security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended security/mac_bsdextended/ugidfw_system.c optional mac_bsdextended security/mac_bsdextended/ugidfw_vnode.c optional mac_bsdextended security/mac_ifoff/mac_ifoff.c optional mac_ifoff security/mac_lomac/mac_lomac.c optional mac_lomac security/mac_mls/mac_mls.c optional mac_mls security/mac_none/mac_none.c optional mac_none security/mac_partition/mac_partition.c optional mac_partition security/mac_portacl/mac_portacl.c optional mac_portacl security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids security/mac_stub/mac_stub.c optional mac_stub security/mac_test/mac_test.c optional mac_test teken/teken.c optional sc | vt ufs/ffs/ffs_alloc.c optional ffs ufs/ffs/ffs_balloc.c optional ffs ufs/ffs/ffs_inode.c optional ffs ufs/ffs/ffs_snapshot.c optional ffs ufs/ffs/ffs_softdep.c optional ffs ufs/ffs/ffs_subr.c optional ffs ufs/ffs/ffs_tables.c optional ffs ufs/ffs/ffs_vfsops.c optional ffs ufs/ffs/ffs_vnops.c optional ffs ufs/ffs/ffs_rawread.c optional ffs directio ufs/ffs/ffs_suspend.c optional ffs ufs/ufs/ufs_acl.c optional ffs ufs/ufs/ufs_bmap.c optional ffs ufs/ufs/ufs_dirhash.c optional ffs ufs/ufs/ufs_extattr.c optional ffs ufs/ufs/ufs_gjournal.c optional ffs UFS_GJOURNAL ufs/ufs/ufs_inode.c optional ffs ufs/ufs/ufs_lookup.c optional ffs ufs/ufs/ufs_quota.c optional ffs ufs/ufs/ufs_vfsops.c optional ffs ufs/ufs/ufs_vnops.c optional ffs vm/default_pager.c standard vm/device_pager.c standard vm/phys_pager.c standard vm/redzone.c optional DEBUG_REDZONE vm/sg_pager.c standard vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/memguard.c optional DEBUG_MEMGUARD vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard vm/vm_kern.c standard vm/vm_map.c standard vm/vm_meter.c standard vm/vm_mmap.c standard vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_phys.c standard vm/vm_radix.c standard vm/vm_reserv.c standard vm/vm_domain.c standard vm/vm_unix.c standard vm/vnode_pager.c standard xen/features.c optional xenhvm xen/xenbus/xenbus_if.m optional xenhvm xen/xenbus/xenbus.c optional xenhvm xen/xenbus/xenbusb_if.m optional xenhvm xen/xenbus/xenbusb.c optional xenhvm xen/xenbus/xenbusb_front.c optional xenhvm xen/xenbus/xenbusb_back.c optional xenhvm xen/xenmem/xenmem_if.m optional xenhvm xdr/xdr.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_array.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_mbuf.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_mem.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_reference.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_sizeof.c optional krpc | nfslockd | nfscl | nfsd Index: projects/clang400-import/sys/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu =================================================================== --- projects/clang400-import/sys/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu (nonexistent) +++ projects/clang400-import/sys/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu (revision 312720) @@ -0,0 +1,711 @@ +begin 644 rtwn-rtl8192eufw.fw.uu +MX9(0`!,````#,1%`*GP``+@0```````````````````"24T"7_4````````` +M`````````F`Y``````````````````)P*``````````````````````````` +M`````````````E_V```````":$@```````)OZ@WP_P\````-\`\`````#?#_ +M#P````WP#P`````0\/\/````$/`/`````/4/``````#P#P``````#0`````` +M`!#P__\````0\#\`````"@@#`P`$"0<#`P`$"`8#`@`$"`4#`0`$#0H'!0`( +M#`H'!``("PH&!0`("PH%`P`("PH#`@`(%!(,!``0%!()!``0)"(<$@`@)"(8 +M#``@)"(4!@`@)"(/!``@)"$*!``@(R$,!``@(Q\*!``@(A\/!``@(1\6#``@ +M,2\@%``P,2\8$``P,2P8#``P,2H4#``P,2@4```P,204```P,1X4```P`@(" +M!0($!0<'!P@*`0($!@<*"PT"`P0'"`L-#P4%!P<("PT/!04'!P@+#0\!`@,& +M!PH+#`T.`@,$!P@+#`T.$`4%!P<("PT/#P\%!0<'"`L-#P\/!`0$!0<'"0D, +M#A`2!`4&!PL-$1,$!08'"PT1$PD)"0D,#A$3"0D)"0P.$1,$"`D*#`X0$A,4 +M!`@*#`X0$1,4%@D)"0D,#A$3$Q,)"0D)#`X1$Q,3````````````)"8J```` +M'R$E)R@`````(R8H*@`````C)B@J`````",F*"H`````("4G*2DJ`````"`E +M)RDI*@`````C)B@J*BH````?(R8H*BHJ````!`````0````(````$````!@` +M```D````,````$@```!@````D````,````#8````/````&0```!X````H``` +M`/````%````!D````>````"@````\````4````&0```"6````R````9````' +MT````,@```$8```!X````M````/H```$L```!D````?0````R````1@```'@ +M```"T````^@```2P```&0```!]`````\````9````'@```"@````\````4`` +M``&0```!X```!+````E@````R````1@```'@```"T````^@```?0```+N``` +M$X@``!=P```?0````,@```$8```!X````M````/H```$L```!D````?0```' +MT```!]````#(```!&````>````+0```#Z```!+````9````'T```!]````?0 +M``(``@`$``@`#``2`!@`)``P`$@`8`!L`!X`,@`\`%``>`"@`,@`\`!0`'@` +MH`#(`2P!D`.$!10`9`",`/`!:`'T`E@#(`/H`&0`C`#P`6@!]`)8`R`#Z``> +M`#(`/`!0`'@`H`#(`/`"6`2P`&0`C`#P`6@!]`/H!=P)Q`NX#Z``9`",`/`! +M:`'T`E@#(`/H`^@#Z`!D`(P`\`%H`?0"6`,@`^@#Z`/H`@0&"`H,$!@@,$!0 +M`@("`@("`P,$!`4%`0$"`@0$!04"`@,#!04&!@4&!@<'"`D*!08&!P<("0H! +M`0("!`0%!08'`@(#`P4%!@8("04&!@<'"`D*"@L%!@8'!P@)"@H+`0$!`0$" +M`P0%!@<(`0(#!`4&!P@%!@8'"`H+#`4&!@<("@L,!08'"`D*"PP%!@8'"`D+ +M#`P,!08&!P@)"PP,#`4&!P@)"@L,#`P%!@<("0H+#`P,&08$`@`8%`T5#A4/ +M%A`7$1@2&!,8'!4.%A`7$1@2&1P:'!L<%`P4#0X4%0\6$!<1$A<,'`X4%0\6 +M$!,7&!,9&!H9```````````````````````````````````````````````` +M``````````````````````````````````````#"KX#^,A)%=(70"W70"*K@ +MPHSEBB1G]8KEC#1Y]8S2C.PDA_CFO`("=/_#E8&T0`!`SGD#>(`6Y@AP"\*O +MYC#A`T08]M*O"-GMZHO0(N4,_R,D@?@/"`B_`P1_`'B!YC#D\@#E#,.?4"`% +M#'2&)0SXYOVF@0CFK@R^`@)T_\WXZ&U@X`CFP."`]N4,TY]`)^4,)(?XYJX, +MO@("=/_]&.;-^.6!;6`&T.#V&(#UY0PDALCV%0R`T^4,(R2!^'\$PJ_F,.`# +M$.(,?P`PX0(#V +M"/8(W_IX@78PD$EM=`&3P.#DD\#@0XD!=8I@=8QYTHS2KR("[].4`D`#?_\B +M=($O+_CF(.7TPJ_F1##VTJ^N#.[#GU`A#G2&+OCF^0CF&+X"`G3__>UI8`D) +MYQD9]PD)@/,6%H#:[M.?0`0%@06![M.?0")TAB[X".;Y[K4,`JF!&`8&YOWM +M:6`)&1GG"0GW&8#S'H#9[R2&^.8$^.\O!)!);9/V".\OD_9_`"+OTY0"0`-_ +M_R+O(R2!^.8PY?3"K^94C/;2K^4,M0<*=(8O^.;U@0)%O5`N=(Q(_"+KG_7PZIY"\.F=0O#HG$7P +M(N#\H^#]H^#^H^#_(N23_'0!D_UT`I/^=`.3_R+@^*/@^:/@^J/@^R+DD_AT +M`9/Y=`*3^G0#D_LBI"6"]8+E\#6#]8,BX/NCX/JCX/DBZ_"CZO"CZ?`BT(/0 +M@OCDDW`2=`&3<`VCHY/X=`&3]8*(@^1S=`*3:&#OHZ.C@-\"1DWDDZ/XY).C +M0`/V@`'R"-_T@"GDDZ/X5``O@))"`!!I=X`0:7?`$&EX`!! +MI><`0:7H`$&EZ0!!I@\`3#A07%A;TQ"O`*O;I'J0>)T$R5N]8+D-)SU@^#[ +MY/W_4?IU\`7E;I"7EA&)X!,35`/[#>3_4?IU\`7E;I"7EA&)X,03$Q-4`?L- +MY/]1^G7P!>5ND)>6$8G@Q%0#^PWD_U'Z=?`%Y6Z0EY,1B>#[Y/T/4?IU\`7E +M;I"7E%'U=?`%Y6Z0EY41B>#$$U0!^PU_`5'Z=?`%Y6Z0EY41B>!4'_L-4?IU +M\`CE;I")`!&)X/OD_0]1^G7P".5ND(D!4?5U\`CE;I")`E'U=?`(Y6Z0B0-1 +M]77P".5ND(D$$8G@^^3]#U'Z=?`(Y6Z0B051]77P".5ND(D&4?5U\`CE;I") +M!Q&)X/L-@#^0I$G@^^3]_U'ZD*1*H^#[#5'ZD*1,X/L-4?J0I$W@5`/[#5'Z +MD*1.H^#[Y/T/4?J0I$[@^PU1^I"D4.#['0]1^G^/<2+O,.`&Y/U_C7'>T-"2 +MKR(1B>#[#>]P!'3P@!;OM`$$=/2`#N^T`@1T^(`&[[0##'3\+?6"Y#0"]8/K +M\"+3$*\!P\#0CX)U@P#@D*8&\'\0?@`2/H>0I@;@_]#0DJ\B?_\24[K3$*\! +MP\#0$E?R<=1QR''(D`$`=#_PH^!4_?"0!5/@1"#PD*.[X/\3$Q-4'S#@!I`' +M>'0!\._#$S#@-Y"C_.!@")"E_G0!\(`%Y)"E_O"0H[O@Q!-4!S#@!^20I?_P +M@`:0I?]T`O"0I?X2:9:0!WAT`?"0H[K@8`+D\-#0DJ\B?P)Q(N]$`?U_`G'> +M?P)Q(N]4_OU_`M,0KP'#P-"/@G6#`.WP?Q!^`!(^A]#0DJ\B?U1Q(N4-7_41 +M?U5Q(N4.7_42?U9Q(N4/7_43?U=Q(N407_44K1%_5''>K1)_57'>K1-_5G'> +MK11_5W'>4Y'O(G^!<2+O5/[]?X%QWG^`<2+O1(#]?X!QWA*P4!(^2!*P71*Q +M*7\!$D:%D*0;=`+P_Q)&A9"D&^`$\/&"$G(,?X!Q(N]$0/U_@''>=2C_$E!5 +M$K">?X%Q(N]$!/U_@7'>$K$WY/\"1PZ0HQ3@1!#PD*,AX/U_DW'>D*,8X&`2 +MD`$OX##G!700\(`&D`$O=)#P?PAQ(N]$$/U_"''>?P'Q^G^0<2+O1`']?Y!Q +MWG\4?@`"/H?3$*\!P\#0D*8"[_!_CW$B[S#F2'^-<2+O9`%P/Y"F`_"0I@/@ +M_9"F`N!U\!"0@0`1B>6"+?6"Y#6#]8/@^^3_4?J0I@/@!/#@PY000--_CW$B +M[S#@!N3]?XUQWM#0DJ\BD/UHX/^0_6#@D*44\.\@X`+!YI"EX.!P&'\N<2*0 +MH_CO\'\M<2*0H_GO\)"EX'0!\)"E%.!D%7!LD/UBX/\PYAST5#\$_I"C^.`3 +M$U0_PYZ0I1/PTY0_0!WD\(`9D*/XX!,35#_^[U0_+I"E$_#3E#]``W0_\)"E +M$^#_5##$5`_^[R7@)>!.D*41\.#]?RYQWI"E$^#$5/#_D*/YX%0/3_U_+7'> +MD*44X+0A#)#]8N#_$I``?P31[)"E%."T(P1_`='GD*44X+0G!'\"T>>0I13@ +MM#`,Y/O]?P$2DD-_!-'LD*44X&0T<%"0_6+@,.`XD*/BX/_#$R#@/Y"E$70! +M\/MZI7D1_7\T$EC?D*41=`/PX/\2G\Z0!)WD\.!$`?"0`>?@5/[P@!$2:7>0 +M!)W@5/[PD`'GX$0!\)"E%.#]M#4'D*/$X$0!\.VT-B.0_6'@D*41\)#]8N"0 +MI1+PD*44X/^0I1'@_7L!>J5Y$A)8WY"E%."T-P,2H&:0I13@M$`4D/UBX##@ +M")"CX'0!\(`%Y)"CX/"0_6C@1`'P(A*0&G\$CW)_`A)'EY"AE.!%Y/U_47'>Y/U_4G'>Y/U_4V'>D`$T=/_PH_"C\*/PD`$\ +M\*/PH_"C\/U_5''>??]_57'>??]_5G'>??]_5V'>\1WQ21*P"1*P*,'[\9N0 +MH9GO\/%VD`%D=`'PD`0CX$2`\`(W^'_T<2+O(.4-?_1Q(N]_`2#D!7\"(G\# +M(A*V8'\(<2+O5._]?PAQWN3_\?J0HQ3@5._P(G\!?@`2/@Q_\G$B[R#F#'\% +M<2+O1(#]?P5QWB)]('%$D*,1=`+P(A*FZ(#PD*7P[_#DH_"C\)`!">!_`##G +M`G\!D*7PX&]@/L.0I?+@E(B0I?'@E!-`")`!P.!$$/`BD*7QY'7P`1((UG\4 +M?@`2/H?3D*7RX)0RD*7QX)0`0+>0`<;@,."P(G7H!W6HA2+DD*36\)"DUN!D +M`?`D7)`!Q/!T4*/P$CY[OP$#$C'WD*,7X&`.D*,:X/^0HQG@;V`"$:3"KQ*P +M^+\!`Q*W8]*O$DES$D6]@+V0HP[@D*,9,.`%X/\"MZ[@_WT!@`1]`7\$TQ"O +M`]P`B%^)/YP`B&X)/Y@223\<`(A\R3\8`)!">ZT#@)1J)"C&N!P!'\!4=B0 +MHQK@M`8"47Z0HQK@M`0.D*8.X/]@!/&E@`,2HCR0HQK@9`A@`D$)$KP400F0 +MHQK@<`1_`5'8D*,:X+0&`E%^D*,:X+0.!U$>OP$"4:B0HQK@9`Q@`D$)41[O +M9`%@`D$)<6]!"9"C&N"T#@=1'K\!`E&HD*,:X+0&`E%^D*,:X+0,!U$>OP$" +M<6^0HQK@9`1P7A*Y->]D`7!6$KI`@%&0HQK@M`X'41Z_`0)1J)"C&N"T!@)1 +M?I"C&N"T#`=1'K\!`G%OD*,:X'`$?P%1V)"C&N"T!!L2NF&`%I"C&N"T#`^0 +MHQ3@_Q,35#\PX`,2O`&0HQK@D`&Z\)"C&>"0`;OPT-"2KR*0H[W@,.`3D*/# +MX,035`#3E`1`")`!N'0(\(`(D`&XY/!_`2*0`;ET`O!_`"*0HQ3@ +MD`8$(.`,X$1`\'T$?P%Q`(`/4?B0!2?@5'_PD*,2=`SPY/W_8;J0HQ3@PQ,@ +MX`11_(`>D`8$X$1`\.!$@/!]!'\!<0"0!2?@1(#PD*,2=`3PY/W_8;J0I@WO +M\!)N-I"F#>!@!>3]_W&Z?01_`7$`D*,2=`3P(N!4?_!]#'\!TQ"O`WP?X\22R+O +M,.0QD*8,X!1@!Q1@'20"<".0HQ/@5`'$,S,S5(#_D*,:X%1_3_U_B(`'D*,9 +MX/U_B1)+WM#0DJ\BD**-X&0!<#&0HQ3@5/WP?2Q_;W&Z<<6_`120HQ/@1(#P +M?0Y_`7$`D*,2=`[P(I`!N70!\)`!N`3P(I"D*Q)(B>#_?@#D_=%2Y/W_D`4B +M[_"0H9CM\"*0H\3@1`+P?0A_`=,0KP'#P-"0I:WO\*/M\)"AEN`$\)`$'>!@ +M09`%(N"0I;'P?2;Q[N]D`7`+T;F0I!S@(.`:@!60H\'@Q!,35`,PX`S1N9"D +M'.`@X`,2N&F0I;'@_WTG<;JQW8`.L=W1N9"D'.`@X`,2N&F0H[W@,.`7D*/! +MX,03$U0#,.`+D`4BX%1O_WTH<;J0!!]T(/!_`=#0DJ\BD*4%[_"0I0=T`O!_ +M`1)OB9"CP>#_$Q-4/S#@()"E!>"T`@1]!X`)D*4%X+0%!GT-?_]QNO'ROP$# +M$DM)D*/!X,03$Q-4`9`'>##@!70#\(`#=`'PD*4%X+0"#9"CON`D`_^0H\T2 +M7\20H[W@PQ,PX`?DD*4&\(`&D*4&=`'PD*/`X,035`<@X!.0H_S@8`B0I0=T +M`?"`!>20I0?PD*4'X/^0I0;@_1)IF^20H\_PD*4%X/^T`@B0H]#@!/"`">^T +M!07DD*/0\)"CO>#$$U0',.`7D*4%X+0"!'T(@&20I07@9`5P8'T.@%B0H[W@ +MQ%0/,.`KD*/!X,035`<@X`F0HQG@_^3]$;V0I07@M`($?0F`+Y"E!>!D!7`K +M?0^`(Y"C%^!@(9"C&>#_Y/T1O9"E!>"T`@1]"H`)D*4%X+0%!GT0?V]QNI"C +MP.`PX`7D_?]QNI"CP.#$$Q,35`$PX`7D_Q*/V9"CP>##$Y`&S>!4[_"0!L_@ +M5._P(I"EK>#_TQ"O`#_?@!]`=,0KP'#P-"0I:?N\*/O +M\*/M\)`$'>!@+I`%(N"0I:SP?13Q[K\!%!*5?I"EJN[P_*/O\/V0I:G@_]'7 +MD*6LX/]]%7&Z@!02E7Z0I:KN\/RC[_#]D*6IX/_1UY`$'W0@\'\!T-"2KR*0 +MH9S@_Y"EKN#[?0$2E8B0I:_N\/RC[_#]D*6MX/]T"2WU@N0T_/6#X%0_\.]@ +M4G0I+?6"Y#3\]8/@1!#P=`DM]8+D-/SU@^!$@/"0H\'@Q!-4!S#@1Y"CU.#_ +MPY0@4!/O)>`EX/]T*RWU@N0T_/6#[_`B="LM]8+D-/SU@W1_\")T*2WU@N0T +M_/6#X%3O\'0)+?6"Y#3\]8/@1$#P(A*X]9"C&N!D#&`1Y/U_#!&]Y/W_<;I] +M".3_<=`BD*.]X"#@*9"C%^!D`7`A$I?(D*,5X%0/8`[D_7\,$;WD_?]QN@*X +M]9"C&N!P`A&Y(N]@19"BC>!D`7`]D*,4X%3^\'TK?P]QNI`&!.!4O_!]".3_ +M<="_`120HQ/@1$#P?09_`7$`D*,2=`;P(I`!N70!\)`!N'0(\")__W&ZY)"F +M`/"C\)`%^.!P#Z/@<`NCX'`'H^!P`W\!(I"CP>#$$Q-4`S#@%=.0I@'@E`.0 +MI@#@E`!``H`3?P&`&].0I@'@E.B0I@#@E`-`"I`!P.!$(/!_`")_,GX`$CZ' +MD*8`Y'7P`1((UH">Y/OZ_7\!$D>^[V#TD*&4X&#NPJ\PX`M4_O#D_Q)DTQ)Y +MT]*OPJ^0H93@_S#A!E3]\!*.;]*OPJ^0H93@_S#B!53[\#'DTJ_"KY"AE.#_ +M,.8%5+_P$;72KX"RY'L!>J1Y!A';[[0"&)"EWN!D!&`+?T#Q<9"EWN`$\"+D +MD*7>\")]`W\1TQ"O`$2 +M2)6.@G6#`!(&HO]T4B[U@N0T_?6#[_`.@-WNPY0&4#-T4B[U@N0T_?6#Y/`. +M@.ONPY0'4!Z0I>$22)6.@G6#`!(&HO]T42[U@N0T_?6#[_`.@-R0_5AT`?`B +MTQ"O`20HHCPT-"2KR*0I;H22)Z0 +MH^/@1!#P?0%_&Q'?D*6][_"_`1R0H^/@5._P1"#PY)"EI?"C=`KPY/O]?VA^ +M`8!1D*6]X/^T`@Z0I;H22)42!HF0H^;P(N^T!`>0H^/@5._P(I"D'.##$U0' +M_W7P#I"D*1)(B>#^=?`.[Y"D*!)(B>"0I:;PD*6E[O#D^_U_5'X!TQ"O`[PH^_PD*6EX/4[H^#U/!(VG9"EH>#^H^#U@HZ#HZ.C=`7PT-"2KR*0 +MI>022)Z0H^/@1`3P?0%_(Q'?CW#E<+0!')"CX^!4^_!$"/#DD*6E\*-T"O#D +M^_U_:'X!@)CE<+0"#I"EY!)(E1(&B9"CY?`BY7"T!!"0H^/@5/OPY/\2D!I_ +M!/%Q(I"D'.`PX'V0I![@<#Y]%G]O$E.Z$E?RD*0!$`?#DD*6E\*-T`U&WD*0>=`'P(I"D'N!D`7`OD*0< +MX,,35`?_=?`.D*0G$DB)X##@&77P#N\25D;DD*6E\*-T`_#D^_U_5'X!0;]Q +MU2*0I!S@_\,3_N]4\?_N!%0')>!/\*/@_Y"D'.#^PQ-4![4'!.Y4\?!1D.20 +MI![P$E?RD*0#Z=?`.[9"D(!)(B>#\5`/][!,3 +M5`?[[L14#Y"EU?"O`A)V7Y"D'.##$U0'=?`.L5J0I!S@PQ-4!_]U\`Z0I"H2 +M2(G@!/!U\`[O$E.ID*0#_D*77X'7P#I"D(Q)(B<"#P(*0I=C@T(+0@W7P`Q)(B>#^ +M=`&H!@B``L,SV/ST7Y"EV?"0I=G@_Y"EU^!U\`Z0I"$22(G`@\""D*78X-"" +MT(-U\`,22(G@_*/@]8*,@^_PD*78X`3P@8/0T)*O(I"D(!)(B>#^5`/_[A,3 +M5`?]TQ"O`WP[Q1@`L%VD`8#X%3[\)"F">#$,U3@_Y`$0N!4GT__ +M\)"E3!((>0````&0I5`2"'D````!?P!^"-'GD*5,$@AY`````9"E4!((>0`` +M``%_`'X)T>>0I4P2"'D````0D*8)X/_D_/W^[U0#_^1X`1((1W@$$@A:D*50 +M$@AM?P!^"M'GD*5,$@AY```,`)"F">#_Y/S]_N]4`__D>`H2"%J0I5`2"&U_ +M`'X-T>>0I4P2"'D,````D*8)X/_D_/W^[U0#_^1X&A((6I"E4!((;7\8?@C1 +MYY"E3!((>0``#`"0I5`2"'D`````T>.0I3H2"'D```P`D*4^$@AY```$`(!E +MD`8#X$0$\)"E3!((>0````&0I5`2"'D`````?P!^"-'GD*5,$@AY`````9"E +M4!((>0````!_`'X)T>>0I4P2"'D```P`D*50$@AY```,`-'CD*4Z$@AY```, +M`)"E/A((>0``#``2=N#0T)*O(G^$?@C3$*\!P\#0D*5*[O"C[_`2-ZV0I502 +M"&V0I4P22%$2"#J0I5022&T22";`!,`%P`;`!Y"E3!)(49"E4!)(;1)()M`# +MT`+0`=``$D@SD*58$@AMD*58$DA1D*JY$@AMD*5*X/ZCX/\2.*30T)*O(I"C +M%^!@`Q*[MV%0D*07D*&4X/^0I@?@ +M_N].D*&4\"(2MG_3D*1+X)0`D*1*X)0`0!;@_*/@_>R0I:7PH^WPY/O]?UQ^ +M`4&_D`%?Y/`B@-"`HGT!?Q4<,.4" +M$>/E'##F`G'LT`?0!M`%T`30`]`"T`'0`-#0T(+0@]#PT.`RY/5D=($E9/6" +MY#24]8/@_U0#]67O5`03$U0_]69TC25D]8+D-*+U@^!P`D%=Y603$Q-4'Y"D +M]?#E9%0'H_"0I/7@)`'U@N0TE?6#X)"D]_#]D*3VX/]T`7X`J`<(@`7#,\XS +MSMCY_^]=<`)!777P$.5DD($!$DB)X"#G`H`3=?`0Y620@0(22(G@D*3X\"#G +M"9`!P>!$(/!!79"D^.`PYCAT@25D]8+D-)3U@^!4^/!U\!#E9)"!`!)(B>#] +M=?`%Y620EY822(G@$Q-4`Y"EGO#D^Z]D46U!7>5E8$45972!)63U@N0TE/6# +MX%3\167_=($E9/6"Y#24]8/O\'7P!>5DD)>6$DB)X!,35`.0I9[P=!0E9/6" +MY#2A]8/@_7L!@$_E9F0!<$WU9G2!)63U@N0TE/6#X%3[_^5F)>`EX$__=($E +M9/6"Y#24]8/O\'7P!>5DD)>6$DB)X!,35`.0I9[P=!0E9/6"Y#2>]8/@_>3[ +MKV2`%:]D$G^*!63E9,.4@%`"`>8BK6&O8-,0KP'#P-"0I9OO\*/M\/F0I9O@ +M_A,3$U0?_>Y4!_YU\!#OD($!$DB)X)"EH/#I5'^0I9_PZW`K=`$M]8+D-)7U +M@\"#P(+@_W0!J`8(@`+#,]C\]%_0@M"#\)"EH.!4?_"`:9"EG^#_PY090`[O +MTY0;4`B0!,]T`O"`!9`$S^3P=`$M]8+D-)7U@\"#P(+@_W0!J`8(@`+#,]C\ +M3]""T(/PD*6;X'7P$)"!`1)(B>!4!_^0I:#PD*6?X)!$19/^,S,S5/A/D*6@ +M\$2`\)"EF^#[<`RCX)"E)_#D_?\2=^&0I9_@^R7@))'U@N0T0_6#Y)/^=`&3 +M_^3\_77P!.N00D$22(D22'D22!EX`1((1Y"EF^#])>`D$O6"Y#26]8/N\*/O +M\)"EG.#_=?`0[9"!`!)(B>_P[7`%D`'([_"0I:#@_Y"EF^#^=?`0D($!$DB) +M[_!U\!#ND($%$DB)X%3\_Y"EGN!/_I"EF^#_=?`0D($%$DB)[O!]`1)^_=#0 +MDJ\BD`!4`Q1@$!1@%B0"0!"W@1`[PY)"D4/"0I/?O\)"D]70"\)"E`Q3P^WJD +M>?7QWG\$`E]QY/^0I/7O\)`$?N#U9:/@]69E96!OD*3V=`/PD*4$=`CPY68$ +M5`_U9^3U9.5G=?`(I"0`]8+D-(#U@^6")63U@N0U@_6#X/]T^"5D]8+D-*3U +M@^_P!63E9+0(T'L!>J1Y]O'>Y68$5`_U9K0/`^3U9I`$?^5F\)"D]>!_!'`# +M`D[L$E]Q(N3_D*12[_#DH_"0I%/@_\.4@%!'=%0O]8+D-*3U@^3P=?`0[Y"! +M`Q)(B>"0I%,PYP[@)('U@N0TD_6#Y/"`%.#_\<^0I%/@)%3U@N0TI/6#=`'P +MD*13X`3P@*]_#'X`$CZ'Y)"D4_"0I%/@_\.4@$`"P2IT5"_U@N0TI/6#X'`" +MP2*0I%/@_W7P$)"!!A)(B>#]=?`0[Y"!!Q)(B>#^[?^0I%/@_"7@)`'U@N0T +MDO6#[O"C[_!U\!#LD($*$DB)X/UU\!#LD($+$DB)X/[M_Y"D4^!U\`J0C0$2 +M2(GN\*/O\'\!D*13X/YU\!"0@0L22(GE@B_U@N0U@_6#X/UU\`KND(T!$DB) +M=?`"[Q)(B>3PH^WP#^^T!#^=),O]8+D-)KU@^[P +MD*13X/^0I%+@_1)^_9"D4^`D@?6"Y#23]8-T`?"0I%/@!/"A."(2K.A_`@). +M[)"C#N`PX!"C=`'PD*,.X/_#$S#@`M'4$KKND*.]X##@!>3_$IP$(M'YD*,= +MX!20!7/P?0)_`M&4@."0HHW@M`$6D*,7X&`0D*,5X%0/9`)@`P*7GA)75R*0 +M`31T0/#]Y/]T%2_XYDW^]G0P+_6"Y#0!]8/N\"*0HQ?@<`>0HP[@,.`1D*,. +MX##@!]'(OP$%X5P25W0BD`5#X'\`,.<"?P$BTQ"O`"T`01_!(`+ +MT!@(Y"C&^!$$/#D +MD*6E\)"C'^"0I:826K>0HQK@(.(#$E"Y$I?((I"C%^!D`F`4D*,5X%0/8`P2 +MIY_O<`;]?PP24+TBD`$V='CPHW0"\'UX_]&4?0)_`]&4D`8*X$0'\)"C(J/@ +MD`58\)"BC>"T`160HQ3@5/OPD*,:X"#B#GT!?P0"4+V0HQ3@1`3P(I"C#N`P +MX`7DH_"C\")U\!#OD($#$DB)X$1`\"+3$*\!P\#0D**(X/]P!J/@9`E@"N\4 +M_Y"BB>"U!P1_`8`"?P#O8`F0`<'@1`+P@#7``9"BB>!U\`^D)/+Y=*$U\*@! +M_'T!T`%^`'\/$@9CD**)X`3PX'\`M`H"?P'O8`7DD**)\-#0DJ\BP.#`\,"# +MP(+`T'70`,``P`'``L`#P`3`!<`&P`<2L7?E(3#A`Q)?5>4A,.(",2[E(3#C +M`Q)?NN4A,.4#$E^\Y2(PX`,2LE?E(C#C`Q*0H^4C,.$#$K2CY2,PX`,2M#3E +M(S#B"=&WD`>/X$00\.4C,.,"\>#E)##A!7\$$D[LY20PY`,29EKE)##E`Q*U +M.N4D,.8#$K6"Y20PYP(Q`-`'T`;0!=`$T`/0`M`!T`#0T-""T(/0\-#@,I"C +MV.`PX`0Q&(`#$IITD*0)X##@`M'I(GT2?_\24[J0!WAT`?"0H_S@_^3]@&V0 +MHQ?@8!20!I+@,.$#`KCUD*,3X%3W\!)0I"(Q&)"CV.#_Q!-4!_[OPQ-4#\.> +M0`*`%Y"CV.#_PQ-4#_[O5.'_[@14#R7@3_`BD*/8X%3^\%3A\)"CW>"0!WCP +MD*/>,9;1-N3]_P)3NN#_H^#]TQ"O`MY/_L +MD*63$@AMY6R0I9.T`0@22%'O1`2`!A)(4>]$(/_LD*63$@AMD*63$DA1D*JY +M$@AM?RQ^"1(XI.5M9`)P`D&,?S!^"1(WK>3__OWLD*63$@AMY6UP%Y"EDQ)( +M4>Y$!_[M1'#][)"EDQ((;8!7D*63$DA1[D0&_NU$8/WLD*63$@AM?RQ^"1(W +MK>3_[)"EEQ((;9"C_."0I9=@"!)(4>]$((`&$DA1[T0$_^R0I9<2"&V0I9<2 +M2%&0JKD2"&U_+'X)$CBDD*63$DA1D*JY$@AM?S!^"1(XI-#0DJ\BTQ"O`]4^TW_ +MD*.]\.Y4$/[O5.].__`2!HG^5"#][U3?3?^0H[WP[E1`_N]4OT[_\!(&B52` +M_N]4?TZ0H[WPD``#$@:B_U0!_I"CP.!4_D[^\.]4`O_N5/U/__"0``,2!J+^ +M5`3][U3[3?^0H\#P[E00_N]4[T[_\)```Q(&HOY4(/WO5-]-_Y"CP/#N5$#^ +M[U2_3O_PD``#$@:B5(#^[U1_3I"CP/"0``02!J+_5"#^D*/!X%3?3O[P[U1` +M_^Y4OT__\)``!!(&HOY4@/WO5']-_Y"CP?#N5`'^[U3^3O_PD``$$@:B_E0$ +M_>]4^TW_D*/!\.Y4`O[O5/U.__"0``02!J+^5!#][U3O3?^0H\'P[E0(_N]4 +M]T[PX/\3$U0_(.`(T3;D_?\24[J0H\'@PQ,@X`Z0!LW@5._PD`;/X%3O\)"D +MVQ)(E1(&B2#@`J&$D`54X)"CSO#@PQ.0H\WPD*/`X,14#S#@%I```1(&HI"C +MOO"0``(2!J*0H[_P@$B0``$2!J+_PY0J4!+OPY0#D*.^4`5T`_"`"N_P@`:0 +MH[YT*O"0``(2!J+_PY0J4!+OPY0#D*._4`5T`_"`"N_P@`:0H[]T*O"0H\'@ +MQ!,35`,PX#V0H[[@=?`#A)"CQO#@PQ.C\)"CO^!U\`.$D*/(\)"CON##$Y"C +MR?"0H[_@PQ.0H\KPD`$^=`CP_7\"$G?,Y)"C^O"0I-L22)60``,2!J+$$Q-4 +M`R#@.Y"CO>#_PQ,@X`KOQ!,3$U0!,.`G$@:)$Q,35!\PX`B0H_S@8`B`"Y"C +M_.!@!751`8`#Y/51?0*O43&;D*.]X,14#S#@')"CP>#$$U0',.`'?01_`A)3 +M`)`%`'0<\*-T$?"0!5AT`O"0H\7@_[0!")"CT'0!\(`B[[0$")"CT'0$\(`6 +M[[0&")"CT'0"\(`*[[0'!I"CT'0%\.20H\7P@&V0I-L22)60``,2!J+_Q!,3 +M5`,PX`5U4@*`%!(&B?\3$Q-4'S#@!752`8`#Y/52$G=D?2!_0!)+WI"DVQ)( +ME9```Q(&HA,3$U0?D`=X,.`%=`/P@`-T`?"M4G\",9OD_?\24[J0!0!T'/"C +M=$/PD*/#X%3?\.20H\_PD*/`X,03$Q-4`3#@"9"C[>!$`O"`#'\!$H_9D*/M +MX%3]\'\#\8F0H[W@(.`'D*/!X%2_\-#0DJ\BTQ"O`!$`O"0`0!T +M__"0!K=T"?"0!K1TAO"0H[O@_Q,3$U0?,.`&D`=X=`/PD*/8X"#@-._#$S#@ +M+I"C_.!@!^20I?SP@`:0I?QT`?"0H[O@Q!-4!Y"E_3#@!70!\(`#=`+PD*7\ +M,99_`A)+(N]$`?U_`A)+WM#0DJ\BTQ"O``PX!^0I`[@M`$,H^"T +M`1-T`O#Q8H`,D*0.X+0"!70#\-'IT-"2KR+3$*\!P\#0D*00X+0!`H!*D*00 +MX+0"$?%B?P'QB1*@D9"D$'0#\(!,D*00X&0#<".0I!/Q9>3_\8D2H)&0I`G@ +MPQ-4`__D^_T2ER&0I!!T!/"`(9"D$."T!!J0I`G@PQ-4`_][`7T!$I`PX!?$5`\PX`OOD`;,<`/P@`B``Y`&S'0#\-#0 +MDJ\B(M$VD*,1=`/P(L#@P/#`@\""P-!UT`#``,`!P`+``\`$P`7`!L`'$DU+ +M4Y&_T`?0!M`%T`30`]`"T`'0`-#0T(+0@]#PT.`R,N3_Y/YTDR_U@N0TF_6# +MX%3^\'7P$.^0@0"^`Q(22(GE@B[U@N0U@_6#=(#P@`\22(GE@B[U@N0U@_6# +MY/!U\`COD(D`$DB)Y8(N]8+D-8/U@^3P#KX0K0^_@*?DD*WC\/_D_G7P"N^0 +MC0$22(EU\`+N$DB)Y/"C\`Z^!>=TE"_U@N0TH/6#Y/!T%"_U@N0TH?6#Y/!T +M%"_U@N0TG?6#Y/!T%"_U@N0TGO6#Y/!T@2_U@N0TE/6#Y/!TE"_U@N0TG/6# +MY/!TDR_U@N0TF_6#X%2=\)"5$70!\'24+_6"Y#2?]8/D\'03+_6"Y#2:]8/D +M\'22+_6"Y#25]8-T__#O)>`D`?6"Y#22]8/D\*/P=),O]8+D-)KU@^3P=?`% +M[Y"7DQ)(B70;\'7P!>^0EY022(GD\'7P!>^0EY422(G@5.#P=?`%[Y"7EA)( +MB>!4\_!U\`7OD)>6$DB)X%3\\'7P!>^0EY422(G@1"#P=?`%[Y"7EA)(B>!4 +MS_!U\`7OD)>6$DB)X$1`\'7P!>^0EY822(G@5'_P=?`%[Y"7EQ)(B>!4_O!U +M\`7OD)>4$DB)X/YU\!#OD($`$DB)[O`/[V2`8`(!BI`$273P\*/D\*-T__"0 +M!#-T`O"C=`3PHP3PHP3PHP3P(G&[<<$1*?&;439QSU'JD*1)=/_PY*/PH_"C +M\*/@5/Q$`O#DH_"C\*/P(GX`?Z-]`'L!>J-Y$Q((JI"C%G0"\)"C'13PH_"C +M=`CPD*,BY/"C=`+PD*-.X"0$D*,L\*-T"/#D_?\24P!]#'\"$E,`$E+\D*&9 +MX/^T`0B0HR%TW?"`#^^0HR&T`P5TU?"``W1`\)"CMG0#\*-T!?"CX%0!1"CP +MHW0%\)"C3N`D!)"C+/"C=`CP\7I^`'\"?0![`7JC>;H2"*J0!@3@5'_PD`8* +MX%3X\.3]_Q)3NN20H[SP(GX`?QE]`'L!>J-Y[1((JG^`?@@2-ZV0H_02"&V0 +MH_022%&0H_`2"&V0H9G@_V0"<"J0_8#@?@`PX`)^`9"C_.[PD/V`X'X`,.$" +M?@&0H_WN\)#]@."0`OOP@$KO9`%P'9#]<.!_`##@`G\!D*/\[_"0_7#@?P`P +MX0)_`8`CD*&9X&0#<""0_7C@?P`PX`)_`9"C_._PD/UXX'\`,.$"?P&0H_WO +M\)#]:.!$`O"0!WAT`?`2GBKQ9)"CUG0!\.3]?P$2:9N0I`G@5/[PD`2/Y/`B +MY)"BC?`BY)"BB/"C\)"A\/"C\")^`'\M?0![`7JD>1P""*J+48I2B5,2!HG$ +M5`__OP\:D*0K4:I2J5.0``$2!J+_ +M$@:)_E0/_77P#I"D'Q)(B>_PD``"$@:B5`/_=?`.[9"D(!)(B>!4_$_PD``" +M$@:B5!S_[E0/_G7P#I"D(!)(B>!4XT_PD``"$@:B5.#_=?`.[I"D(!)(B>!4 +M'T_PD``$$@:B_Q(&B50/_>3[L02K4:I2J5.0``42!J+_$@:)5`_]>P&Q!*M1 +MJE*I4Y```Q(&HC,S,U3X_Q(&B?Y4#_UU\`Z0I"@22(GO\)```Q(&HL035`?_ +M=?`.[9"D*1)(B>_P[L14#_\4;7`ED*0=[_"0``82!J)4#\14\/^0I!S@5`]/ +M\%3Q\$0!\'T@Y/_QS"*/5(U5K@-T'\.55$`4D*3?[O"K5>3]L9>0I-OO\"34 +M@%5T/\.55$`6D*3?[O"K57T@KU2QEY"DV^_P)(B`.'1?PY540!:0I-_N\*M5 +M?4"O5+&7D*3;[_`DT(`;='_#E51`,)"DW^[PJU5]8*]4L9>0I-OO\"2$_>0T +M!/QU\`[E59"D(1)(B77P`^X22(GL\*/M\"+#[YWU5L.4"%`DY/57=?`.ZY"D +M(Q)(B<"#P(*0I-_@T(+0@W7P`Q)(B>56\(!&Y5;#E!!0"757`>56)/B`%^56 +MPY084`EU5P+E5B3P@`=U5P/E5B3H_W7P#NN0I",22(G`@\""D*3?X-""T(-U +M\`,22(GO\*]7(H]4?1<25^YU\`[E5)"D'Q)(B>#\=?`.Y520I"`22(G@_E0# +M_>X3$U0'^Y"D'.#^Q%0/D*75\*\$T5]U\`[E5!)=6G7P#N54$E.IK53D_P)< +M<=,0KP'#P-"0I=+O\.UD`7`OZ[0!!^`D`O5S@`B0I=+@)/[U0`` +M`/^O<]'6D*4Z$@AY````_Z]S@""0I3H2"'D```#_D*72X/_1UI"E.A((>0`` +M`/^0I=+@_^3\_?Z0I3X2"&U]&'P`?P'1YM#0DJ\BY/S]_I"E/A((;7T8?`#D +M_],0KP'#P-"0I3CL\*/M\)"E-^_PHZ/@_1(^.9"E0A((;9"E.A)(41((.I"E +M0A)(;1)()L`$P`7`!L`'D*4Z$DA1D*4^$DAM$D@FT`/0`M`!T``22#.0I482 +M"&V0I3BCX/W`!9"E1A)(49"JEA((;9"E-^#_T`42/3G0T)*O(GX`?PI]`'L! +M>J-YXQ((JI"CT70"\"+D_W0O+_6"Y#2C]8/D\`_OM!GOY)"C*O"0HR[PD*,H +M\")^`'\!?0![`7JC>0X2"*J0HP[@5/WPY*/PH_"C\*-T#/`B[Q20!7/PD`$_ +M=!#P_7\#=!TO^.9-_O9T."_U@N0T`?6#[O`BD*4FZ_!P9)"E)N#^)!/U@N0T +MF_6#X/R0I2?@^^QK8$N0I2KK\*/N\*X%[B7@3_^0E1'@_B7@)>!/D*4L\)"E +M*'0,\)"E-G0$\'L!>J5Y*!)GWG\$$E]QD*4GX/^0I2;@)!/U@N0TF_6#[_`B +MCV!U\`7OD)>7$DB)X%0!^W7P$.5@D($`$DB)X/5A5'_Z=?`%Y6"0EY,22(G@ +M^9"DY?!U\`3JD$)!$DB)$DA=Y6`EX"02]8+D-);U@^[PH^_P=?`%Y6"0EY82 +M2(G@$Q-4`_5CZL.90`(A5704)6#U@N0TH/6#ZO!T%"5@]8+D-*#U@^#U8L.4 +M#$`EY6*4&U`?NP$H$_9"D +MY>#_[=.?0`(AQ>T3$Q-4'_]U\`CE8)")`!)(B>6"+_6"Y#6#]8/@]8)U@P#M +M5`?_=`%^`*@'"(`%PS/.,\[8^?_N58/^[U6"3F`&J@6*88!S#8"JD*3EX&IP +M4'04)6#U@N0TH/6#Y6'P=?`%Y6"0EY422(G@_\035`51D)>6$DB)X,03$Q-4`3#@`L'Q=($E4?6"Y#24]8/@5/CP=?`% +MY5&0EY422(G@5!^0I./P=?`%Y5&0EY822(G@$Q-4`Y"DY/#E427@)`'U@N0T +MDO6#X/ZCX-.4`.Z4`%`"P?'E477P"J0D`?ETC37P^GL!BU;U5XE8Y5$EX"0! +M]8+D-)+U@^#U6Z/@]5QTDR51]8+D-)KU@^#_D*38Y/"C[_"0``(2!ZO_KO`2 +M!X`O_^7P/OZ0``02!ZLO_^XU\/Z0``82!ZLO_^XU\/Z0``@2!ZLO_^XU\)"D +MVO"C[_`2!X#_PY"DV^"?_I"DVN"5\)"DW/"CSO"0``82!ZO]K/`EX/_L,_[O +M+?WN//R0``02!ZLEX/_E\#/^D``"$@>K+__N-?#/+?WO//RK5I``"!('J_NJ +M\*X">`+#,\XSSMCY+?_L/I"DWO"C[_#E4=.4`5!(Y5O#$_[E7!/_P^N?ZIY0 +M")"5$70!\(`PJU:J5ZE8D``($@>K^ZKPY5RN6W@#SL,3SA/8^?]\`'T%$@<# +MT^N?ZIY`!>20E1'P=?`0Y5&0@0`22(G@D*37\%1_]5)U\`7E49"7DQ)(B>"0 +MI.#P=)0E4?6"Y#2>]8/@PY0%0`+!ZY"DX.#_Y5*?0`V/4I"DU^!4@/[P[T[P +MY5*00>V3_W03)5'U@N0TG/6#X,.?Y5)`!9!!18`#D$&9D_5=D*/^X&!]Y5)D +M$V`%Y5*T"P60I`"`(^529!)@!>52M`H%D*0!@!/E4F018`7E4K0)!9"D`H`# +MD*/_X/5>Y5[#E(!0*.5>E!M``H`3Y5TE7O_D,_[3[Y0;[F2`E(!`!75=&X`@ +MY5XE7?5=@!C#Y)5>]5[E7=.57D`(Y5V57O5=@`/D]5WE777P!J0DH_ET0#7P +M=5/_]52)59"DU^"01)F3_].0I-G@GY"DV."4`$`"P>20!*GE7/##E`KE6Y0` +M4&:0!*C@!/"K5JI7J5B0``82!ZO_KO"0``@2!ZLO_>7P/OSE6\,3_N5<$__3 +M[9_LGD`$?0+!YN5`TY_E\)Y0`L'KP<7E427@)!+U@N0TEO6#X/59H^#U6M/E7)3HY5N4`T`( +MD*3B=`7P@!C3Y5R4&>5;E`!`")"DXG0"\(`%Y)"DXO#D]5^K5JI7J5AU\`+E +M7Z3U@H7P@Q('J_^N\)"DXN#][Z@%"(`%SL,3SA/8^?^K4ZI4J56%7X)U@P`2 +M!J+]?``2!P/O)5KU6NXU6?59!5_E7[0%L*M3JE2I59``!1(&HOU\`)"DXN#_ +MY5RN6Z@'"(`%SL,3SA/8^?\2!P/3Y5J?Y5F>0`SE6I_U6N59GO59@`7D]5GU +M6N51)>`D$O6"Y#26]8/E6?"CY5KPKEG_Y/S]=?`$Y5*00D$22(D22'G#$DA` +M0`+!RW02)5'U@N0TE?6#X/]T$R51]8+D-)SU@^#^TY]``^Z`&G03)5'U@N0T +MG/6#X/]T$B51]8+D-)7U@^##GY"DX?"0I.'@TY0$0`1TE(`A=)0E4?6"Y#2< +M]8/@TY0!=)1`#B51]8+D-)SU@^`4\(`+)5'U@N0TG/6#Y/!U\`3E4I!"01)( +MB1)(7>51)>`D$O6"Y#26]8/N\*/O\'24)5'U@N0TG/6#X'`FKU$138`@Y5(E +MX"21]8+D-$/U@]-T`9.56N23E5E`!WT!KU$2@$'D_:]1T?T%4>51PY2`4`(A +MUB+3$*\!P\#0[6!B=?`*[Y"-`1)(B>3PH_!U\`KOD(T#$DB)Y/"C\'7P"N^0 +MC0422(GD\*/P=?`*[Y"-!Q)(B>3PH_!U\`KOD(T)$DB)Y/"C\.\EX"0!]8+D +M-)+U@^3PH_!TDR_U@N0TFO6#Y/!U\!#OD($#$DB)X%2_1(#^=?`0[Y"!`Q)( +MB>[PT-"2KR)T@2_U@N0TE/6#X%3PQ%0/_G7P$.^0@0422(G@5`/]=?`%[Y"7 +MDQ)(B>#\=!0O]8+D-*#U@^!4?_5GTY0;4&'E9\.4#$!:#NZ4!$`TY/YT$R_U +M@N0TG/6#X/MT$B_U@N0TE?6#Z_!T$R_U@N0TG/6#X"0\^W24+_6"Y#2<]8/K +M\'2!+_6"Y#24]8/@5`_[[L14\$O^=($O]8+D-)3U@^[PY6?3G$`"C&>0I9[M +M\.3[K6<28FVO9R*/8'24+_6"Y#2<]8/D\'7P!>^0EY<22(G@5`'_=?`0Y6"0 +M@0`22(G@]6%4?Y"DY_!U\`7E8)"7E!)(B>"0I.KP=?`%Y6"0EY,22(G@^9"D +MZ_!U\!#E8)"!!1)(B>!4`_5BD*3GX/XEX"21]8+D-$/U@^23^G0!D_OE8"7@ +M)!+U@N0TEO6#ZO"CZ_!T%"5@]8+D-*#U@^[P=!0E8/6"Y#2@]8/@]6.0I.?@ +MTYE`"I"DZ^"0I.?P]6'M<`)!$)"DZ.WPY6$PYPJ0I.?@]6&CX!3PD*3HX/YP +M`D$0D*3JX/V0I.?@TYU0`D$*Y)"DYO#E8].4&U!9Y6.4#$!3[V0!<$[E8R3T +MPYZ0I.GP_:]@4;*K8JUCKV!1'H]A=($E8/6"Y#24]8/@5`3_OPP*D*6>Y6+P +M>P%!&'2!)6#U@N0TE/6#X##B"I"EGN5B\.3[01B0I.?@%)"DY?"0I.K@_Y"D +MY>#]PY]`:^#_$Q,35!_^=?`(Y6"0B0`22(GE@B[U@N0U@_6#X/MZ`.]4!_]T +M`7X`J`<(@`7#,\XSSMCY_^Y:_N];3F`?C6&0I.;@!/"0I.C@_Y"DYN!O8!F0 +MI.K@_^5ATY]`#I"DY>`4\("-D*3JX/5AD*6>Y6+PY/L28FFO82*0I.SK\'2! +M+_6"Y#24]8/@5/OP=!0O]8+D-)WU@^#[8"]T%"_U@N0TGO6#X&`BK@.L!G2! +M+_6"Y#24]8/@5/M$!/UT@2_U@N0TE/6#[?"`0704+_6"Y#2=]8/@`D$?6"Y#1%]8/DD_]T%"SU@N0TG?6#[_!T%"SU@N0TGO6#Y/!AU(!'[F0! +M<&GM8!AD`6`4[60#8`_M9`1@"NUD"6`%[60*<"?M)>`D$O6"Y#1%]8/DD_]T +M%"SU@N0TG?6#[_#M)>`D$?6"Y#1%@$SM)>`D$?6"Y#1%]8/DD_]T%"SU@N0T +MG?6#[_#M)>`D$O6"Y#1%@"7M)>`D$?6"Y#1%]8/DD_]T%"SU@N0TG?6#[_#M +M)>`D$O6"Y#1%]8/DD_]T%"SU@N0TGO6#[_!T%"SU@N0TG?6#X/ET%"SU@N0T +MGO6#X/WI$Q,35!__=?`([)")`!)(B>6"+_6"Y#6#]8/@^WH`Z50'_W0!?@"H +M!PB`!<,SSC/.V/G_[EK^[UM.<`IT%"SU@N0TG8!$[1,3$U0?_W7P".R0B0`2 +M2(GE@B_U@N0U@_6#X/MZ`.U4!_]T`7X`J`<(@`7#,\XSSMCY_^Y:_N];3G`, +M=!0L]8+D-)[U@^3P(H]2=?`%[Y"7EA)(B>`3$U0#_W03)5+U@N0TG/6#X/[3 +ME#)`+'7P!>52D)>3$DB)X/54=?`%Y5*0EY422(G@_,035`<@X`*A1N541(#U +M4X![[M.4'D`O[1)(IX3Q`(3V`83Q`H3V`X3Q!(3V!83[!H3[!P``A49U5!>` +M3754$(!(=50(@$-T$R52]8+D-)SU@^#3E`I`+^T22*>%,0"%-@&%,0*%-@.% +M,02%-@6%.P:%.P<``(5&=504@`UU5`R`"'54!(`#Y/54A513D`2EY5/PD*6> +M[_#D^ZU3KU(28FUTDB52]8+D-)7U@^"U4Q!TE"52]8+D-)_U@^`$\(`;=)(E +M4O6"Y#25]8/E4_!TE"52]8+D-)_U@^3P=)0E4O6"Y#2?]8/@M`(;=!,E4O6" +MY#2:]8-T"O!TE"52]8+D-)_U@^3P(I"DVQ)(GA(&B?51))/U@N0TF_6#X%2< +M\'23)5'U@N0TF_6#P(/`@N#_D*3;$DB5D``#$@:B5`'^[T[0@M"#\'23)5'U +M@N0TF_6#P(/`@N#_D*3;$DB5D``#$@:B5`+^[T[0@M"#\'23)5'U@N0TF_6# +MP(/`@N#_D*3;$DB5D``#$@:B5"#^[T[0@M"#\'23)5'U@N0TF_6#P(/`@N#_ +MD*3;$DB5D``#$@:B5$#^[T[0@M"#\'23)5'U@N0TF_6#X/]4(,035`?^=?`% +MY5&0EY422(G@5!_]D`2D[_#E4<.4@%`6D``"$@:B_W03)5'U@N0TG/6#[_"` +M#^51M(`*D``"$@:BD)<2\.Y@!*]1D7LBTQ"O`5$DB) +MX%0?_R3W4`*`6^3U:)"E]N#]=?`(D(D`$DB)Y8(E:/6"Y#6#]8/@_N]U\`>D +M)%;U@N0T0/6#Y8(E:/6"Y#6#]8/DD_SN7/YU\`CMD(D`$DB)Y8(E:/6"Y#6# +M]8/N\`5HY6BT"*CD_75I!N5IM`8=_Y"E]N!U\`B0B0`22(GE@B_U@N0U@_6# +MX%0/@!F0I?;@=?`(D(D`$DB)Y8(E:?6"Y#6#]8/@D*7W\)"E]^!@,75H!W0! +M?@"H:`B`!<,SSC/.V/G_D*7WX/OO6V`+Y6EU\`BD)6C]@!@5:.5HPY0`4-+E +M:6`+%6GE:<.4`$`"X4[D_/5IY6FT!AW_D*7VX'7P")")`!)(B>6"+_6"Y#6# +M]8/@5`^`&9"E]N!U\`B0B0`22(GE@B5I]8+D-8/U@^"0I??PD*7WX&`OY/5H +M=`%^`*AH"(`%PS/.,\[8^?^0I??@^^];8`OE:77P"*0E:/R`#P5HY6BT"-0% +M:>5I9`=PA)"E]N#_=?`%D)>3$DB)[?!U\`7OD)>4$DB)[/!U\!#OD($`$DB) +MX/]4?_5J[U2`]6OE:M.=0`?E:TWU:H`+Y6K#G%`%Y6M,]6J0I?;@_R04]8+D +M-)_U@^5J\'7P!>^0EY822(G@$Q-4`Y"EGO"0I?;@_^3[K6H28FV0I?;@=?`0 +MD($#$DB)Y/#0T)*O(G2!+_6"Y#24]8/@5/SP=)0O]8+D-*#U@^#^8#=T%"_U +M@N0TH?6#X&`J[GH`^Y"DYNKPH^OP=($O]8+D-)3U@^!4_$0"_G2!+_6"Y#24 +M]8/N\(!(=)0O]8+D-*#U@^!P#W04+_6"Y#2A]8/@8`*`''24+_6"Y#2@]8/@ +M_F`;=!0O]8+D-*'U@^!P#NYZ`/N0I.;J\*/K\(`#KP4BD*3FH^#_(JP'=),L +M]8+D-)OU@^#_5`+#$_[O5$#$$Q-4`V0!<$WMPY0`0`;MTY0#0`SMPY0(0"_M +MTY0*4"GM)>`D]/6"Y#1$]8/DD_]TE"SU@N0TH/6#[_!T%"SU@N0TH?6#Y/!! +MC>UD!6`B[60+<$2`&^YD`7!D[<.4`$`&[=.4`T`*[60(8`7M9`EP)^TEX"3T +M]8+D-$3U@^23_W24+/6"Y#2@]8/O\.TEX"3S]8+D-$2`3.TEX"3S]8+D-$3U +M@^23_W24+/6"Y#2@]8/O\.TEX"3T]8+D-$2`)>TEX"3S]8+D-$3U@^23_W24 +M+/6"Y#2@]8/O\.TEX"3T]8+D-$3U@^23_W04+/6"Y#2A]8/O\'24+/6"Y#2@ +M]8/@^704+/6"Y#2A]8/@_>D3$Q-4'_]U\`CLD(D`$DB)Y8(O]8+D-8/U@^#[ +M>@#I5`?_=`%^`*@'"(`%PS/.,\[8^?_N6O[O6TYP"G24+/6"Y#2@@$3M$Q,3 +M5!__=?`([)")`!)(B>6"+_6"Y#6#]8/@^WH`[50'_W0!?@"H!PB`!<,SSC/. +MV/G_[EK^[UM.<`QT%"SU@N0TH?6#Y/`B$@:)5'__D``!$@:B_E0?_>Y4@,03 +M$Q-4`9"DV_"0``(2!J+^5`.0I-SP[E0PQ%0/D*3>\)```A(&HOY40,03$U0# +MD*3=\.Y4@,03$Q-4`?Z0``(2!J)4"/P3$Q-4'Y"DW_#N5`'$,S,S5(#^=?`% +M[Y"7EA)(B>!4?T[PD*3=X%0!Q#,S5,#^=?`%[Y"7EA)(B>!4OT[PD*3?X&`" +M@9GM8`AD`F`$[;0$$'7P!>^0EY<22(G@1`'P@`YU\`7OD)>7$DB)X%3^\.U4 +M'_YU\`7OD)>5$DB)X%3@3O"0I-S@5`/^=?`%[Y"7EA)(B>!4_$[P[B7@)>#^ +M=?`%[Y"7EA)(B>!4\T[PD*3;X%0!Q#-4X/YU\`7OD)>5$DB)X%3?3O"0I-[@ +M5`/$5/#^=?`%[Y"7EA)(B>!4ST[PY/[N]8)U@P"CHZ,2!J+]=?`([Y")`!)( +MB>6"+O6"Y#6#]8/M\`[NM`38$H;/(I"DV!)(GI"DU^_P$DBGC0,`C0P!C14" +MC1T0C241C2X2C384C3\@C4@AC5$CC5DDC6(EC6M`C7Q!C7-"C85@C8YAC9=B +MC:!CC:EDC;)EC;IFC<-GC=LC?!MC?EN``".`I"DV!)(E0*A +M39"DV!)(E0*AF9"DV!)(E<$ED*38$DB5X;&0I-@22)4"<]Z0I-@22)7!$I"D +MV!)(E0*GTY"DV!)(E0*G_I"DV!)(E0*J#Y"DV!)(E>&7D*38$DB5`J/3D*38 +M$DB5`JI5D*38$DB58320I-@22)4"A;^0I-@22)4"MQZ0I-@22)4":I&0I-@2 +M2)4"6O&0I-@22)4"7[Z0I-@22)4"6C>0I-@22)4"JI.0I-@22)7AGY"DV!)( +ME0*3"Y"DV!)(E0*1B9"DV!)(E0*2HI"DV!)(E0*3Q)"DV!)(E0*J]9"DV!)( +ME0*L&9"DV!)(E0*L0Y"DV!)(E0*LMY`!P.!$`?"0I-?@D`'"\"(2!HG_D**, +M\+\!!]'3Y)"BC/`BTQ"O`JU&J4JE3D``!$@:B9`%@$9"C#N`@X`?D_Q*ELX`#$F;4T-"2KR+3$*\! +MP\#0D*'QX/^0H?#@M0<$?P&``G\`[W!#D*'PX/YU\`B0H:`22(G@_>YU\`BD +M)*'Y=*$U\/I[`:\%D9J0H?#@!/#@?P"T"@)_`>]@!>20H?#P$JSHD*&4X$0" +M\-#0DJ\B>P%ZI'G;?_5^`1(TO+\!!I"DV^"C\'L!>J1YVW_V?@$2-+R_`0B0 +MI-O@D*3=\'L!>J1YVW_T?@$2-+R_`0B0I-O@D*3>\'L!>J1YVW_S?@$2-+R_ +M`0B0I-O@D*3?\'L!>J1YVW_R?@$2-+R_`0B0I-O@D*3@\)"DW.#_H^#]H^#[ +MH^"0I.3PD*3@X)"DY?"0I.9T$O"0I/1T!?"0I.CO\*/M\*/K\)"DY."0I.OP +MD*3EX)"D[/![`7JD>>829]Y_!`)?<1(&B9"C(?`B$@:)D*/\\)```1(&HI"C +M_?`BD`0DX/51$@:))5&0I"OPD``!$@:B)5&0I#GPD``"$@:B)5&0I$?P(I"C +M[>#^PQ,PX!SOM`$%D*/T@`.0H_`22%&0JKD2"&U_@'X($CBD(I"E%70(\)"E +M(W0!\)"E%^_P>P%ZI7D5`F?>D*5<=`GPD*5J=`?PD*5>[_!P,9"CUN!@&J/@ +M8`*`#)`'<.!P!I`'=.!@")"E7W0!\(`%Y)"E7_#DD*5@\*/PH_"C@#F0_6+@ +MD*5?\)#]8^"0I6#PD/UDX)"E8?"0_67@D*5B\)#]9N"0I6/PD/UGX)"E9/"0 +MI5_@5`&0H];PH_![`7JE>5P"9]Z0H^/@_\,3,.`$[U3]\)"CX^#_$Q,35!\P +MX!#O5/?PD/U8X##@!>3_$D[GD*/CX/_$$U0',.`$[U3?\)"CX^#_Q!,3$U0! +M,.`K[U1_\)#]6.`@X`B0I05T`?"`!>20I07PD*4%X/V0H^C@^^3_44-_!!). +M[)"CY.#_PQ,PX`3O5/WPD/U8X"#@7)"CX^`PX`A[`7JD>09A"Y"CX^#_Q%0/ +M,.`)>P%ZHWGF`EHWD*/CX/_$$Q-4`S#@"'L!>J-YZ(`ED*/CX/\3$U0_,.`) +M>P%ZHWGE`EKQD*/DX##@"'L!>J-YYU&B(M,0KP'#P-"0I8D22)X2!HF0I8SP +MD``!$@:BD*6-\)```A(&HI"ECO"0``,2!J*0I8_PD``$$@:BD*60\.2C\*/P +MD*/CX$1`\'L!>J5YC'T'?S`26-^/<>5QM`$?D*/CX%2_\$2`\.20I:7PHW0* +M\.3[_7]H?@$26K^`.N5QM`(:D*6)$DB5BT"*08E"=4,%>P%ZHWGH$C6%@!OE +M<;0$%I"CX^!4O_"0I8S@^^3]_U%#?P027W'0T)*O(I"E:W0+\)"E>70'\)"E +M;>_P8#*0_6/@D*5N\)#]8>"0I6_PD/UDX)"EP%ZI7EK`F?>TQ"O`D*6^$DB5$@:)D*0&\)```1(&HI"D!_"0``(2!J*0I`CP +MD*/CX$0!\)"EOA)(E1)8VY"EP>_POP$=D*/CX%3^\$0"\.20I:7PHW0*\.3[ +M_7]H?@$"6K^0I<'@M`0'D*/CX%3^\"*0I-MT"O"0I.ET!O`2!HF0I-WPD``! +M$@:BD*3>\)```A(&HI"DW_"0``,2!J*0I.#PD``$$@:BD*3A\)``!1(&HI"D +MXO![`7JD>=L"9]X2!HGU49```1(&HO54D``"$@:B]560``,2!J+U5I``!!(& +MHO57D``%$@:B]5B0``82!J+U6>51$DBGE!<`E!\!E"<"E"\#E#<$E#\%E$<& +M``"47G52`G53*8!%=5(&=5,J@#UU4@%U4S&`-752`753,H`M=5(&=5,S@"5[ +M`'H`>51A=I"C_N54\*/E5?"CY5;PH^57\*/E6/`B=5(!=5/_>P!Z`'E4K5*O +M4P)8W],0KP'#P-"0I7IT%?"0I8AT`?"0I7SO\'L!>J5Y>A)GWM#0DJ\BD*&> +MX/]["'T!L8B0I0GN\/RC[_#]D*4&X/^CX/NCX)"E$/"0I0WL\*/M\*/K\)"E +M#>#\H^#]$E;7D*4-H^#__20-]8+D-/SU@^!$@/!T#2WU@N0T_/6#X%3O\'02 +M+_6"Y#3\]8/@1`+P=!(O]8+D-/SU@^!4`_"0I0_@_Y"E#:/@_B0J]8+D-/SU +M@^_PD*40X/]T*R[U@N0T_/6#[_!T+"[U@N0T_/6#X"0"\"*0I0;O\*/M\*/K +M\)`$'>!@()`%(N"0I0OP?0$25^[O9`%P`I&7D*4+X/]]`A)3NH`"D9>0!!]T +M(/`BD*6GH^#_>PA]`=,0KP'#P-"0I>[M\*/K\)"E[>_PY/W\\99\`*T'D*7M +MX)`$)?"0I>[@8`YT(2_U@N0T_/6#X$2`\*\%="`O]8+D-/SU@^!4P/!T(2_U +M@N0T_/6#X%3`\*\%=!(O]8+D-/SU@^!4`?Z0I>_@)>`EX/ON1`)+_G02+_6" +MY#3\]8/N\'01+_6"Y#3\]8-T__!T*2_U@N0T_/6#X%3W\*X$KP70T)*O(M,0 +MKP'#P-"0I0CN\*/O\)`$'>!@'I`%(N"0I0SP?3825^Z_`0+1>9"E#.#_?3<2 +M4[J``M%YD`4BX%1O_WTX$E.ZD`0?="#PT-"2KR*0H9_@_^3[?0&QB)"E"N[P +MH^_PD*4(X/RCX/VK!Y"E#>WP[/G@_ZX#="HN]8+D-/SU@^_P="LN]8+D-/SU +M@^GP(M,0KP'#P-"0I=KO\)`$'>!@,Y`%(N"0I=WP?2D25^Z_`1>0H9VQ@I"E +MV^[P_*/O\/V0I=K@_Q)6UY"EW>#_?2H24[J`%Y"AG;&"D*7;[O#\H^_P_9"E +MVN#_$E;7D`0?="#PT-"2KR*0I07O\*/M\*/K\)"D">`3$Q-4'R#@#I"E!N"T +M`0=]-G]O$E.ZD*4%X'`,D*4'X/]]!1)3T(`FD*4%X+0!"9"E!^#_T;J`%I"E +M!>"T`@^CX+0!"I"D%^#^H^#_T2V0I`G@$Q,35!\@X`N0I0;@<`7]_Q)3NB*0 +M_1#O\'\`(I"CN^#$5`\@X!^0!!W@!D`7`XD`:2X"#B +M!I`$X^!@()`&DG0$\)"CTN`$\)"CR>!U\`.$_Y"CTN"U!P*``D%8Y)"CQ?"0 +MH]`$\"*0H\7@9`1P-9`&DN`@X@:0!./@8!R0!I)T!/"0H]+@!/"0H\C@_Y"C +MTN"U!P*``D%8Y)"CQ?"0H]!T!/`BD*/%X&0&8`(AN9"CT^#_D*/2X"__Y#/^ +M?`!]`Q('`Y"CR>`O_^P^_L/OE$'N9("4@%`(D*/3X)0#0!&0H\#@Q%0/D*/0 +M(.`"(5M!`9"CQ.#_$Q-4/S#@>N]4^_#DH_"0H\#@Q%0/,.`.D*/BX##@`B'G +MD*/000&0H]/@_Y"CTN`O_^0S_GP`?0,2!P.0H\G@+__L/O[#[Y1![F2`E(!` +M#I"CXN`PX`(AYY"CT$$!D*/BX##@%I"CQ70)\)`&DG0$\.20H]+PD*7?00V0 +MH]!T`O`B$E/%D*/3X`3P?P,27\B0H]/@_Y"CTN`O_^0S_GP`?0,2!P.0H\G@ +M+__L/O[#[Y1![F2`E(!0"I"CT^"4`U`"07.0!WAT`_"0!2+@1!#_?0,24[J0 +M!)S@!/`BD*/%X&0'<$^0H]/@M`0%D*/0@#20H\3@_Q,35#\PX"SO5/OPY*/P +MD*/BX##@%Y"CQ70)\)`&DG0$\.20H]+PD*7?!(`/D*/0=`7P(A)3Q9"CT^`$ +M\(!(D*/%X&0)<%N0H\3@,.`.D*/0=`7PD*/$X%3^\"*0!I+@,.(I=`3PD*/2 +MX`3PX+0"%Y"EW^"0H]!@!70%\(`#=`+PY)"CQ?`B?P,"7\B0I=_@D*/08`5T +M!?"``W0"\.20H\7P(I"CO>`PX#41`I"CT.#_M`$"@!R0H]#@_[0"`H`@`_LM`0/D*._X/^0H\W@ +MPY^0H]3PD*.]X,035`#$5`\PX%20H\/@5-_PY/U_!!)0O9"CP>#$ +M$Q-4`S#@.I"CQ.!$`O!4^_#DD*/3\)"CT/"0I07@_[0!")"CQ70&\(`*[[0$ +M!I"CQ70'\)"CO.!@!Y"CQ.!$!/"0I07@M`$$?0:`"9"E!>"T!`=]#']O$E.Z +MD*/!X!,3$U0?,.`;D*/4X/_#E"!0"N]_`"7@)>#^@`1__WY_$I8MD*/`X##@ +M!N3]_Q)3NB+3$*\!P\#0D*6R[_"C=`+PY/\2;XF0H\'@_Q,35#\PX`,2;C:0 +MH\'@_S#@")`'>'0!\(!"D*.]X/[$$Q-4`S#@")`'>'0-\(`MD*/`X/[$5`\P +MX`WN$Q-4/Y`'>##@#X`2D*/`X/[#$Y`'>##@!70#\(`#=`GPD*6RX&0#8`*A +M#._$$Q-4`S#@<9"CQ^#_D*/2X/[3GT!#[G7P`Z3_D*/)X/[#[Y[_)`/]Y#/\ +MD*._X/[3G>QD@/AT@)A`".Z?D*6U\(`&D*6U=`/PD*6UX/\27\B0H]#@!/"` +M&9"CRN#_$E_(D*/%=`3PY)"CT/"0!I)T!/#DD*/2\(`.D*._X/\27\B0H]#@ +M!/"0H[W@Q!,3$U0!,.`'Y)"EM/"`!I"EM'0!\)"CP.#$$U0'(.`3D*/\X&`' +MY)"EL_"`!I"ELW0!\)"ELQ)IEI"CSW0!\)"CO>#$$U0',.`-D*6RX'!"_?\2 +M4[J`.Y"CO>#$5`\PX!V0H\/@1"#PD*.\X&`$?0&`&^3]_Q)3NGT!?PR`$9"E +MLN"T`PV0HQ?@8`?D_7\$$E"]D*.\X&`8D*6RX'`$?02`"I"ELN!D`W`T?0M_ +M;X`KD*6RX'`$_?^`(9"ELN"T`QV0H[W@_\035`<@X`OO$Q-4/S#@`Q*7GN3] +M_Q)3NI"CP.#$$Q,35`$PX`5_`1*/V9"CP>##$Y`&S3#@#>!$$/"0!L_@1!#P +M@`O@5._PD`;/X%3O\-#0DJ\B?@!_,'T`>P%ZHWF]$@BJD*.^=`OPHW0(\)"A +MF>#\9`)P'9#]@.!^`##B`GX![E0!Q#,S5,#^D*/`X%2_3O`B[&0!<`V0_7#@ +M?P`PX@)_`8`3D*&9X&0#]4`<0S,U3`_Y"CP.!4OT_P +M(N20I/7PD*.]X##@;)"CP>#$$Q-4`S#@'Y"CR>#_$E_(D`:2=`3PD*/%=`'P +MY)"CTO"0H]#P@!&0I/7@_Y"COA)?Q)"CT'0!\)"CSW0!\)"CO.!@!WT%?V\" +M4[KD_?\24[J0H[W@_\035`<@X`OO$Q-4/S#@`Q*7GB*0H]C@,.`%$FE)@`+1 +MH)"CP>#_Q!,35`,PX!.0I`7@!/#@M!0)D`2#][7@"SL,3SA/8^?^0HTCN +M\*/O\)"C%.`3$Q-4'S#@$Y`!.^`PY`P2HJ20HQW@%)`%<_"0I?KD=?`!$@C6 +MPY"E^^"4@)"E^N!D@)2`0`N0`9C@5/[PX$0!\'\!`D[LD*46[_!_+'X)$C>M +M[U0$_^3]_.]@")"CWG0!\(`%Y)"CWO!_,'X)$C>M[E0!_NU4$/WD_.U.8`?D +MD*/?\(`&D*/?=`'PD*/8X$0!\'T1$DM$D`=XX)"CW?"0H_S@_^3]$FF;D*46 +MX/UP`H`D[;0!"I"CV.!4'T0@\"*0I1;@_;0""I"CV.!4'T1@\"+MM`,'D*/8 +MX%0?\"*0H\#@Q!,35`,@X!Z0_6+@,.`7X)"E%3#A!70!\(`#=`+PD*45X/\2 +MG\XB[W`0 +M!W@$\(`?O@$(D`=X=`/P@!2^`@B0!WAT"?"`";X#!I`'>'0-\)"D">#^Q!-4 +M!S#@1>]P'Z/@$Q-4/S#@!^20I07P@`:0I05T`?"0I07@_>3_@""0I`K@Q!,3 +M5`,PX`?DD*4%\(`&D*4%=`'PD*4%X/U_`1)IFR*0I@CO\'\"$D>7D*&5X/^0 +MI@C@_N].D*&5\"*0`@G@]5$2!HDE49"AFO"0``$2!J(E49"AF_"0``(2!J(E +M49"AG/"0``,2!J(E49"AG?"0``02!J(E49"AGO"0``42!J(E49"AG_`BBU&* +M4HE3D``!$@:B__55$@:)_L,3,.`*D``"$@:B]5:``H]6A554Y533E590,ZM1 +MJE*I4Q(&B50!_W2-)53U@N0THO6#[_!TC254]8+D-*+U@^"O5'`$42N``E$: +M!52`QA)W>N55#_H^!O<`L2=[U1 +MI)"C'N`4\)`!YN`$\"*0HHW@9`%@`F'"D*,7X'`"8<*0HQ7@_\14#V0!<":0 +M!JO@D*,>\)`&JN`$D*,=\*/@_W`(D*,=X/[_@`/O!/^0HQ[O\)"C$^`PX`,2 +MLN20HQ3@1`3PY)"C(/"0HR*CX)`%6/"0`5?D\)`!/'0"\)"C&^!4_?!4[_"0 +MHQ7@_\14#R3]4`*`#Y"C#N`PX`42M\^``Q*Z?9"C%.`3$Q-4'S#@#Y"C'>#_ +MH^"U!P42=[U1JI"C#N##$R#@!Y"C%.!$!/`BTQ"O`]4^TW_D*,.\.Y4 +M"/[O5/=.__`2!HG^5!#][U3O3?^0HP[P[E0@_N]4WT[_\!(&B?Y40/WO5+]- +MD*,.\.[#$R#@`H'MX"#@`H'5=50A$Q-4/S#@"!)FBT-4"(`,Y)"C#_"C\'U` +M_U&ND*,.X/\3$Q-4'S#@`T-4$N_$5`\PX`-#5!20HP[@Q!-4!S#@`T-4@)"C +M#N#$$Q-4`R#@`T-40)`%)^54\)"C$>!P!'\!L;.0HP[@_\03$U0#,.`$?P2` +M(A)FR.]@!'\!@!A_`H`4=50!D`4GY53PD*,1X&0$8`*AKO^QLZ&ND*,.X/\@ +MX`*A>T-4,1,35#\PX`@29HM#5`B`!GU`Y/]1KI"C#N#_$Q,35!\PX`-#5`+O +MQ%0/,.`#0U0$D`4GY53PD*,.X/_$$Q-4`S#@#I"C$N!D`F!JY/U_`H`BD`4G +MX$1`\)"C$N"T`AGQN!)FR+\!"9"C&>#_?0&``^3]_Q)0O8`]D*,:X)"C$O"` +M,W54`9`%)^54\)"C$N"T`@9]`7\$@`N0HQ+@M`@'?0%_#!)0O?%!D*,9X/]] +M`1)0O1)WF]#0DJ\BTQ"O`"0I@KP;W`"P;SO%&!"%&!L%'`"P6<4 +M<`+!DR0$8`+!O)"F"N"T!`31[\&\D*8*X+0"!-'^P;R0I@K@M`,$\0+!O)"F +M"N!D`6`"P;S1\<&\D*8*X+0$!/$CP;R0I@K@M`($\1/!O)"F"N"T`P3Q!L&\ +MD*8*X&`"P;S1Z,&\D*8*X+0$!/%*@'>0I@K@M`$%$D_J@&N0I@K@M`,$\3>` +M8)"F"N!P6A)/]8!5D*8*X+0$!/%=@$J0I@K@M`$$T=J`/Y"F"N"T`@42;^&` +M,Y"F"N!P+='8@"F0I@K@M`,$\7*`'I"F"N"T`031PX`3D*8*X+0"!/&'@`B0 +MI@K@<`+1P=#0DJ\BT>A]'W]O$E.ZD`4GX%2_\)"C$70$\"+1Z'TA?_\24[J0 +MHQ%T`_`BD*,1=`'P(O$CD`4GX%2_\.20HQ'P(O$3@._Q!H#KY/W_$E.ZD*,1 +M=`'P(A)N-N3]_Q)3NI"C$70!\"+D_?\24[J0!2?@1$#PD*,1=`'P(A)+29"C +M$70"\"+QG^]P`Q)7="*0!2?@1$#P?2,22T20HQ%T`O`B?2)__Q)3NI`%)^!$ +M0/"0HQ%T`_`B?25_;Q)3NI`%)^!4O_"0HQ%T!/`B$FXV?21_;Q)3NI`%)^!4 +MO_"0HQ%T!/`BD`0:X/1@`W\`(I`$&^!4!V0'?P%@`G\`(N3]_Q)3NGT$?P$2 +M4P"0!2?@1$#PD*,2=`3P(A(&B50!_Y"D4>!4_D_P,.`9D*&9X/^T`0>0HR%T +MW/`B[[0#!I"C(734\"*0I-L22)X2!HG_5'^0HQ?P[\03$Q-4`:/PD``!$@:B +M_U3PQ%0/_I"C%>!4\$[PD``#$@:B5`$EX/Z0HQ/@5/U.\.]4#\14\/^0HQ7@ +M5`]/\)```A(&HI"C%O"0``82!J(PX%[#$U0'_\.4!)"C*5`$[_"`+G0#\)"D +MVQ)(E>DD!OGD.OH2!HG_=`,D_?[OQ%0/_>]4#__M+E0/_L14\$\2!L^0I-L2 +M2)60``82!J+$5`__PY0$D*,?4`5T!/"``N_PD*3;$DB5D``$$@:B_7\"$E,` +MD*3;$DB5D``%$@:B_U0!_I"CN^!4_D[^\.]4`O_N5/U/__"0``42!J+^5`3] +M[U3[3?^0H[OP[E0(_N]4]T[_\)``!1(&HOY4$/WO5.]-_Y"CN_#N5"#^[U3? +M3O_PD``%$@:B5$#^[U2_3I"CN_#@_\03$U0#(.`I[\,3(.`+=5(!D*/\X&`+ +M@`[D]5*0H_S@8`7D]5&``W51`:U2KU$2:9N0I-L22)4QGY`!N70!\)`!N/"0 +MHQ?@D`&Z\)"C&>"0`;OPD*,5X%0/D`&^\"*0I-X22)XQSI"C%^#_$J)0D*,7 +MX&`8D*3>$DB5D``!$@:B5`__D``"$@:B_3'?(I"C$^!4^_#DD*,@\)"C&_`B +M[R3^8`L$<">0HQUT`O"`%NUP"I"CN>"0HQWP@`60HQWM\)"C'>"C\)"C%.!$ +M"/`BD``"$@:B_S#@)A(&B9"CMO"0``$2!J*0H[?P[U3^_Z/@5`%/\)```Q(& +MHI"CN?`BD*.V=`/PHW0%\*/@5`%$*/"C=`7P(A(&B9"CO/!@-*/@(.`OY/U_ +M!!)0O9"CN^#_PQ,PX![O$Q,35!\@X!60H[O@$Q-4/Y`'>##@!'0-\")T"?`B +MD*3;$DB>D*7GX'`3?X!^"!(WK9"C]!((;9"EYW0!\)"DVQ)(E1(&B?_DCU3U +M4_52]5&0H_022%'L5,'\P`3`!<`&P`>O5*Y3K5*L47@9$@A:T`/0`M`!T``2 +M2#.0H_`""&T2!HG_5`'^D*0)X%3^3O[P[U0&_^Y4^4__\!(&B?Y4"/WO5/=- +M_Y"D"?#N5!#^[U3O3O_P$@:)5"#^[U3?3I"D"?"0``$2!J+_5`/^D*0*X%3\ +M3O[P[U0$_^Y4^T__\)```1(&HOY4,/WO5,]-_Y"D"O#N5$#^[U2_3O"0``(2 +M!J*0I`OPD``#$@:BD*0,\)``!!(&HI"D#?"0I`O@_WX`?`%]0!('`^]X!<[# +M$\X3V/G_D*03[O"C[_"0I`S@_WX`?`%]0!('`^]X!<[#$\X3V/G_D*05[O"C +M[_"0I`W@_WX`?`%]0!('`Y"D%^[PH^_PD*0)X##@%Y"D#G0!\*/PH_#DH_"C +M\)`'@^!$(/`BY)"D#O"C\*/PH_"C\)`'@^!4W_`BD*3;$DB>D*3;$DB5$@:) +MD*09\)```1(&HI"D&O"0I-L22)5]`G\X`EC?$@:)5`$EX/^0H^+@5/U/\.## +M$_]4`9`!YO"CX%3^\.\PX$E_HQ)+(N]4^$0%_7^C$DO>?Z`22R+O5`]D!'`E +MD*0#X##@`H`DD/UBX+2M$Z/@M#4.D`'GX%3^\)`!Y73?\"*``)`!Y^!$`?`B +MD`'GX%3^\"(2!HE4`27@)>#_D*/BX%3[3_#@$Q-4/S#@")`'9>!$&/`BD*0# +MX"#@!Y`'9>!4Y_`BD`',X%0/D*7X\)"E^.#]<`+!-Y"A\.#_<`:CX&0)8`KO +M%/^0H?'@M0<$?P&``G\`[V`(D`'!X$0!\"*0I>C@_W0!?@"H!PB`!<,SSC/. +MV/G_[UUP`L$4Y)"E^?"0I?G@^<.4!%!SD*7HX'7P!*3_Z?U\`"__[#7P_G30 +M+_6"=`$^]8/@_Y"A\>!U\`B0H:`22(GE@BGU@N0U@_6#[_"0I>C@=?`$I"W_ +M[#7P_G3P+_6"=`$^]8/@_Y"A\>!U\`B0H:022(GE@BGU@N0U@_6#[_"0I?G@ +M!/"`@Y"E^.#_D*7HX/YT`:@&"(`"PS/8_/1?D*7X\)"EZ.#_=`&H!PB``L,S +MV/R0``$\.!_`+0*`G\![W`"@?+DD*'Q\('RD`'` +MX$0"\)"EZ.!$@)``BO"0I>C@=?`$D`'0$DB)X)`!P_`BTQ"O``Q7\!T-"2KR+3$*\!P\#0D*7.$DB>?Y9^ +M`M$X[V!8D`$7X/Z0`1;@?``D`/_L/O[O)`'_Y#[^D*71[_#N_Y#]$?"0I='@ +M_9`"E/"C[_"0I`D&/^0I#^!/"0``'N$@;A=``O^>0T^_I[`<`# +MP`+``9"ERQ)(E8M`BD&)0G5#`M`!T`+0`Q(UA9"ERN`D`OGD-/OZ>P'``\`" +MP`&C$DB5Z20"^>0ZBT#U08E"D*7+$DB5D``.$@:B]4/0`=`"T`,"-860I>H2 +M2)[D_Y"EZA)(E8^"=8,`$@:B_G3P+_6"Y#0"]8/N\`_OM!#@(I``\>!4\,14 +M#_\B=142Y/46=1<'=1ARD`$PY17PH^46\*/E%_"CY1CP(G4=#G4>`4,>"'4? +M`W4@8D,@@$,?!)`!..4=\*/E'O"CY1_PH^4@\"*0`93@1`'PD`''Y/`BD`$! +MX$0$\)`!FN!4P/!_"GX`$CZ'D`&9X$3`\)`!FW2`\"*0`9K@5,!$"_!_"GX` +M$CZ'D`&8X%3`?P"T0`)_`2+DD*34\*/P$8#O9`%@1<.0I-7@E(B0I-3@E!-` +M#Y`!P>!$$/"0`<=T_?"`)Y"DU.1U\`$2"-9_%'X`$CZ'TY"DU>"4,I"DU."4 +M`$"[D`'&X##CM)`!QW3^\")]`I`!Q'3X\'2PH_"0I!O@_^W#GU`8[27@)('X +MYC#D"Y`!N'0(\*/P?P`B#8#>?P$BY)"AE/"C\*/PH_"C\"*0`>1T$_"CY/`B +MD`$TX%45]1FCX%46]1JCX%47]1NCX%48]1R0`33E&?"CY1KPH^4;\*/E'/"0 +M`23@527U)O`BD`$\X%4=]2&CX%4>]2*CX%4?]2.CX%4@]220`3SE(?"CY2+P +MH^4C\*/E)/!3D=\BD`'/X)"D]?#@_S#@!Y`!S^!4_O#O,.4CD`'/X%3?\)`! +M-'0@\.3UJ/7H$D\=D``#X%3[_7\#$DO>@/XBD**-X&0!<""0HQ?@8!J0`5?D +M\)`!/'0"\.20I:7PD*.WX)"EIA):MR*0HHW@9`%P)I"C%^!@()`!5^3PD`$\ +M=`+PD*,3X%3[\)"C&^!4_?!4!W`#$E"D(I"BC>"T`120HQ?@8`Z0HQO@5/[P +M5`=P`Q)0I")QZ9"E!>_P,.`%?0'D@`+D_?\24P"0I07@,.81D`$OX##G!.3P +M@`:0`2]T@/"0HRC@_Z/@_9"C+>#[K`>0HQ/@,.`RD*,IX-.4`U`'D*,?Z_"` +M"NTD_2N0HQ_P?0.0HT[@)`3#G2S_D*,L\)"C(N3PH^_P@`Z0HR+D\*-T`O"0 +MHQ_K\)"C(J/@D`58\"+DD*3U\/VC\)`%8N#^D`5AX/OK>`+.PQ/.$]CY_Y"C +M2N[PH^_PH^#^H^#_D*-*X/JCX/O#G^J>0"CKG_^0HRS@_L-T"IXO_<.4&5`3 +M="\M]8+D-*/U@^`$\)"C*N`$\'&WD*,JX,.49$!HY)"D]O"0I/7PD*3UX/_# +ME!E01W0O+_6"Y#2C]8/@_Y"D]N`O\.#3E`5`)Y"D]>#_E`I`"N\D]I"C*?#D +M@`[DD*,I\)"D]>#_PW0*GY"C*/"`")"D]>`$\("OD*,IX/U[".3_49$2=WHB +MD*.VX/^0HR#@TY]`))"C+N`$\.#_E`10&)"C*._P)>`D")"C+?#[D*,HX/^C +MX/U1D2+DD*4&\*/PH_!_@Q)+(I"E!N_P?X,22R*N!Y"E!N#_M08!(L.0I0C@ +ME&20I0?@E`!`#9`!P.!$0/"0I0;@_R*0I0?D=?`!$@C6@+Z0HP[@_S#@/I"C +M$N!^`+0"`GX!D*,1X'T`M`0"?0'M3G`D[\,3,.`#`F;4D7N0HQ+@M`@&Y/U_ +M#(`)D*,2X'`&_7\$$E"](I"C#N#_Q!,35`,PX`^0HQ+@9`)@!WT!?P(24+V0 +MHQ+@9`)@`Q)G7"*0HP[@_S#@/Y"C$N!^`+0"`GX!D*,1X'T`M`0"?0'M3G`E +M[\,3,.`#`F;4D>N0HQ+@M`P&Y/U_"(`*D*,2X+0$!N3]_Q)0O2*0`5?@8$CD +M\)`!/'0"\)"C$^#_$Q-4/S#@#.]4^_"0HQO@5/WP(I"C(.`$\)"C&^!4[_"0 +MH[;@_Y"C(.#3GT`.D**-X+0!!Y"C%.!4^_`BD*,3X/_$$Q-4`S#@.N]4O_"0 +M!.#@D*,4,.`&X$0!\(`0X%3^\)`!N70!\)`!N'0$\)"CP^#_Q!-4!S#@!WT! +M?PP"4+T24*0BD*,3X/_$$Q,35`$PX"SO5'_PD`3@X)"C%##A!N!$`O"`#^!4 +M_?"0`;ET`?"0`;@$\)"C%^!@`Q)0I)"CP>#_Q!,35`,PX"*0H\3@_\,3,.`8 +M[U3]\)`$X."0H\0PX0;@1`3P@`3@5/OPD`3@X##A`K'U(I"D'.`PX#7#$U0' +M_W7P#I"D)Q)(B>#^,.`B=?`.[Y"D)Q)(B>Y4_O"0I!YT!?"0I!S@PQ-4!_U_ +M`1)<<2+3$*\!P\#0D`0=X&`:D`4BX%208`>0`<#@1`CPD`'&X##AY'\`@`)_ +M`=#0DJ\BD`'$=&#P=+:C\'^0$DLB[R#@]W1@!)`!Q/!TMJ/P(N20I?/PH_"0 +M!2+@D*7U\)`$+>!4`?"0!!W@8#S#D*7TX)30D*7SX)0'4"V0I$G@M/\-?1A_ +M_Q)3NN20I%#P(I`%(G3_\'\!?@`2/H>0I?/D=?`!$@C6@+Z0I$G@_WL8?0$2 +ME8AT%"_U@N0T_/6#X,035`/_D*1-X%3\3_"0I?7@5&__?1D24[J0!!]T(/"0 +MI$[D=?`!$@C6D*10=`'P(A(&B9"DV_#T8!S@D*1)\)```A(&HG7P"J3_D*1* +MY?#PH^_P`E^,D*3;X)"D2?#DH_"C\)`!7_`BD*,1X&0"?P%@`G\`(I"C$^`P +MX!B0HP[@_S#@#L,3,.`'\5:_`0:``H``\8,BD*,:X/]@`[0(#A*Z!+\!"/&< +MD`'EX`3P(M,0KP'#P-`23)\23[/0T)*O(JX'$F;(OP$6D*,.X,03$U0#(.`* +MKP9]`1)0O7\!(G\`(I`&J>"0I/7PX/U4P'`)D*,;X%3^\(!_[3#F3I"C%^!D +M`G`KD*,3X/_#$R#@"9"C&^!$`?"`*9"C%>!4#V0!<#"0HQO@1`3P?P$2EKJ` +M(I"C&^!$`?"0HQ7@5`]D`F`%$I>>@`P25U>`!Y"C&^!4_O"0I/7@D*,;,.<; +MX$0"\.20I:7PD*.WX)"EIA):MY"C$^!$!/`BX%3]\"*0I:_@_J/@_Y"!`.!4 +M#_VL!W0-+/6"Y#3\]8/@1`'P=`TL]8+D-/SU@^!4^_"L!W02+/6"Y#3\]8/@ +M1/KP=!$L]8+D-/SU@^!$'_"L!W0&+/6"Y#3\]8/@1`[PD`2GY/"0!*;PD`2E +M=/_PD`2D=/WP=!0L]8+D-/SU@^!4P$W]=!0O]8+D-/SU@^WP(I"C$^`3$Q-4 +M'S#@!9`!6^3PD`:2=`+PD`$\=`3PY)"EI?"0H[C@PQ-4?Y"EIO#D^_U_6'X! +M$EJ_D*,3X$0(\"*0H[W@,.`CD*//X&`(D`&X=$#P(?N0HQG@TY0`0`*`-9"C +MO.!P`B'S@&<2IY_O9`%@")`!N'0!\"'[D*,;X/]4`V`(D`&X=`+P@'N0HQG@ +M_N3#GE`(D`&X=`3P@&GO,.((D`&X=`CP@%V0HQO@,.0(D`&X=!#P@$Z0HQ3@ +M$Q-4/R#@")`!N'0@\(`[D*.\X&`(D`&X=(#P@"V0!F+@,.$(D`&X=!'P@!Z0 +M!F+@,.`/X%3\_[^`")`!N'02\(`(D`&XY/!_`2*0`;ET!/!_`"*0`H?@8`B0 +M`;AT`?"`)9`"EN!@")`!N'00\(`7D`*&X"#A")`!N'0$\(`(D`&XY/!_`2*0 +M`;ET"/!_`")]+1)7[I`!-W0"\/U_`Q)FE!)+2>3]?P$24P#DD*,2\")]+G]O +M$E.Z?0)_`1)3`)`%)^!4O_"0HQ)T`O`BY/5DD`:IX/5D5,!P#9"C&^!4_O!4 +M_?`"4*3E9##F(Y"C%^!D`7`BD*,;X$0!\)"C%>!4#V0"8`42EYZ`#!)75X`' +MD*,;X%3^\.5DD*,;,.<;X$0"\.20I:7PD*.WX)"EIA):MY"C$^!$!/`BX%3] +M\"+D]620HQ?@<`)AMI"BC>!D`6`"8;:0HQ/@,.`=D`5BX/Z0!6'@_>UX`L[# +M$\X3V/G_D*-,[O"C[_"0HQ7@_\14#V`B)/Y@`P1P'I"C'N`4\.#_8`:0HR#@ +M8`[O<`B0HQW@H_"``'5D`9"C#N`PX!*0HQ+@M`(#Y/5D$F;([W`"]63E9&!# +MD*,;X$00\)"C(.!@`[0!"^20I:7PD*,@X(`/Y)"EI?"0HR#@=?`#I"3^_Y"C +M'^`OD*6F$EJWD*,:X"#B`Q)0N1*7R"*0HQ/@_Q,35#\PX!'O5/OPD*,;X%3] +M\%0'<"Z`*9"C(.`$\)"C&^!4[_"0H[;@_Y"C(.#3GT`/D**-X+0!"Y"C%.!4 +M^_`B$E"D(GTO$DM$?0A_`1)3`)"C$G0(\"(2;C;D_?\24[H24OR0HQ)T#/`B +#`*FO +` +end Index: projects/clang400-import/sys/dev/ath/if_ath.c =================================================================== --- projects/clang400-import/sys/dev/ath/if_ath.c (revision 312719) +++ projects/clang400-import/sys/dev/ath/if_ath.c (revision 312720) @@ -1,6709 +1,6712 @@ /*- * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). * * It's also required for any AH_DEBUG checks in here, eg the * module dependencies. */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ATH_TX99_DIAG #include #endif #ifdef ATH_DEBUG_ALQ #include #endif /* * Only enable this if you're working on PS-POLL support. */ #define ATH_SW_PSQ /* * ATH_BCBUF determines the number of vap's that can transmit * beacons and also (currently) the number of vap's that can * have unique mac addresses/bssid. When staggering beacons * 4 is probably a good max as otherwise the beacons become * very closely spaced and there is limited time for cab q traffic * to go out. You can burst beacons instead but that is not good * for stations in power save and at some point you really want * another radio (and channel). * * The limit on the number of mac addresses is tied to our use of * the U/L bit and tracking addresses in a byte; it would be * worthwhile to allow more for applications like proxy sta. */ CTASSERT(ATH_BCBUF <= 8); static struct ieee80211vap *ath_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ath_vap_delete(struct ieee80211vap *); static int ath_init(struct ath_softc *); static void ath_stop(struct ath_softc *); static int ath_reset_vap(struct ieee80211vap *, u_long); static int ath_transmit(struct ieee80211com *, struct mbuf *); static int ath_media_change(struct ifnet *); static void ath_watchdog(void *); static void ath_parent(struct ieee80211com *); static void ath_fatal_proc(void *, int); static void ath_bmiss_vap(struct ieee80211vap *); static void ath_bmiss_proc(void *, int); static void ath_key_update_begin(struct ieee80211vap *); static void ath_key_update_end(struct ieee80211vap *); static void ath_update_mcast_hw(struct ath_softc *); static void ath_update_mcast(struct ieee80211com *); static void ath_update_promisc(struct ieee80211com *); static void ath_updateslot(struct ieee80211com *); static void ath_bstuck_proc(void *, int); static void ath_reset_proc(void *, int); static int ath_desc_alloc(struct ath_softc *); static void ath_desc_free(struct ath_softc *); static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); static void ath_node_cleanup(struct ieee80211_node *); static void ath_node_free(struct ieee80211_node *); static void ath_node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *); static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); static int ath_tx_setup(struct ath_softc *, int, int); static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); static void ath_tx_cleanup(struct ath_softc *); static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched); static void ath_tx_proc_q0(void *, int); static void ath_tx_proc_q0123(void *, int); static void ath_tx_proc(void *, int); static void ath_txq_sched_tasklet(void *, int); static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); static void ath_scan_start(struct ieee80211com *); static void ath_scan_end(struct ieee80211com *); static void ath_set_channel(struct ieee80211com *); #ifdef ATH_ENABLE_11N static void ath_update_chw(struct ieee80211com *); #endif /* ATH_ENABLE_11N */ static void ath_calibrate(void *); static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void ath_setup_stationkey(struct ieee80211_node *); static void ath_newassoc(struct ieee80211_node *, int); static int ath_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel []); static void ath_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel []); static int ath_getchannels(struct ath_softc *); static int ath_rate_setup(struct ath_softc *, u_int mode); static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); static void ath_announce(struct ath_softc *); static void ath_dfs_tasklet(void *, int); static void ath_node_powersave(struct ieee80211_node *, int); static int ath_node_set_tim(struct ieee80211_node *, int); static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); #ifdef IEEE80211_SUPPORT_TDMA #include #endif SYSCTL_DECL(_hw_ath); /* XXX validate sysctl values */ static int ath_longcalinterval = 30; /* long cals every 30 secs */ SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 0, "long chip calibration interval (secs)"); static int ath_shortcalinterval = 100; /* short cals every 100 ms */ SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 0, "short chip calibration interval (msecs)"); static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 0, "reset chip calibration results (secs)"); static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 0, "ANI calibration (msecs)"); int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf, 0, "rx buffers allocated"); int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf, 0, "tx buffers allocated"); int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt, 0, "tx (mgmt) buffers allocated"); int ath_bstuck_threshold = 4; /* max missed beacons */ SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 0, "max missed beacon xmits before chip reset"); MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); void ath_legacy_attach_comp_func(struct ath_softc *sc) { /* * Special case certain configurations. Note the * CAB queue is handled by these specially so don't * include them when checking the txq setup mask. */ switch (sc->sc_txqsetup &~ (1<sc_cabq->axq_qnum)) { case 0x01: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); break; case 0x0f: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); break; default: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); break; } } /* * Set the target power mode. * * If this is called during a point in time where * the hardware is being programmed elsewhere, it will * simply store it away and update it when all current * uses of the hardware are completed. * * If the chip is going into network sleep or power off, then * we will wait until all uses of the chip are done before * going into network sleep or power off. * * If the chip is being programmed full-awake, then immediately * program it full-awake so we can actually stay awake rather than * the chip potentially going to sleep underneath us. */ void _ath_power_setpower(struct ath_softc *sc, int power_state, int selfgen, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d, target=%d, cur=%d\n", __func__, file, line, power_state, sc->sc_powersave_refcnt, sc->sc_target_powerstate, sc->sc_cur_powerstate); sc->sc_target_powerstate = power_state; /* * Don't program the chip into network sleep if the chip * is being programmed elsewhere. * * However, if the chip is being programmed /awake/, force * the chip awake so we stay awake. */ if ((sc->sc_powersave_refcnt == 0 || power_state == HAL_PM_AWAKE) && power_state != sc->sc_cur_powerstate) { sc->sc_cur_powerstate = power_state; ath_hal_setpower(sc->sc_ah, power_state); /* * If the NIC is force-awake, then set the * self-gen frame state appropriately. * * If the nic is in network sleep or full-sleep, * we let the above call leave the self-gen * state as "sleep". */ if (selfgen && sc->sc_cur_powerstate == HAL_PM_AWAKE && sc->sc_target_selfgen_state != HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, sc->sc_target_selfgen_state); } } } /* * Set the current self-generated frames state. * * This is separate from the target power mode. The chip may be * awake but the desired state is "sleep", so frames sent to the * destination has PWRMGT=1 in the 802.11 header. The NIC also * needs to know to set PWRMGT=1 in self-generated frames. */ void _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", __func__, file, line, power_state, sc->sc_target_selfgen_state); sc->sc_target_selfgen_state = power_state; /* * If the NIC is force-awake, then set the power state. * Network-state and full-sleep will already transition it to * mark self-gen frames as sleeping - and we can't * guarantee the NIC is awake to program the self-gen frame * setting anyway. */ if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, power_state); } } /* * Set the hardware power mode and take a reference. * * This doesn't update the target power mode in the driver; * it just updates the hardware power state. * * XXX it should only ever force the hardware awake; it should * never be called to set it asleep. */ void _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", __func__, file, line, power_state, sc->sc_powersave_refcnt); sc->sc_powersave_refcnt++; /* * Only do the power state change if we're not programming * it elsewhere. */ if (power_state != sc->sc_cur_powerstate) { ath_hal_setpower(sc->sc_ah, power_state); sc->sc_cur_powerstate = power_state; /* * Adjust the self-gen powerstate if appropriate. */ if (sc->sc_cur_powerstate == HAL_PM_AWAKE && sc->sc_target_selfgen_state != HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, sc->sc_target_selfgen_state); } } } /* * Restore the power save mode to what it once was. * * This will decrement the reference counter and once it hits * zero, it'll restore the powersave state. */ void _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", __func__, file, line, sc->sc_powersave_refcnt, sc->sc_target_powerstate); if (sc->sc_powersave_refcnt == 0) device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); else sc->sc_powersave_refcnt--; if (sc->sc_powersave_refcnt == 0 && sc->sc_target_powerstate != sc->sc_cur_powerstate) { sc->sc_cur_powerstate = sc->sc_target_powerstate; ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); } /* * Adjust the self-gen powerstate if appropriate. */ if (sc->sc_cur_powerstate == HAL_PM_AWAKE && sc->sc_target_selfgen_state != HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, sc->sc_target_selfgen_state); } } /* * Configure the initial HAL configuration values based on bus * specific parameters. * * Some PCI IDs and other information may need tweaking. * * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable * if BT antenna diversity isn't enabled. * * So, let's also figure out how to enable BT diversity for AR9485. */ static void ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config) { /* XXX TODO: only for PCI devices? */ if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) { ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */ ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE; ah_config->ath_hal_min_gainidx = AH_TRUE; ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88; /* XXX low_rssi_thresh */ /* XXX fast_div_bias */ device_printf(sc->sc_dev, "configuring for %s\n", (sc->sc_pci_devinfo & ATH_PCI_CUS198) ? "CUS198" : "CUS230"); } if (sc->sc_pci_devinfo & ATH_PCI_CUS217) device_printf(sc->sc_dev, "CUS217 card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_CUS252) device_printf(sc->sc_dev, "CUS252 card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT) device_printf(sc->sc_dev, "WB335 1-ANT card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT) device_printf(sc->sc_dev, "WB335 2-ANT card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_BT_ANT_DIV) device_printf(sc->sc_dev, "Bluetooth Antenna Diversity card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_KILLER) device_printf(sc->sc_dev, "Killer Wireless card detected\n"); #if 0 /* * Some WB335 cards do not support antenna diversity. Since * we use a hardcoded value for AR9565 instead of using the * EEPROM/OTP data, remove the combining feature from * the HW capabilities bitmap. */ if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV)) pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB; } if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) { pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n"); } #endif if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) { ah_config->ath_hal_pcie_waen = 0x0040473b; device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n"); } #if 0 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) { ah->config.no_pll_pwrsave = true; device_printf(sc->sc_dev, "Disable PLL PowerSave\n"); } #endif } /* * Attempt to fetch the MAC address from the kernel environment. * * Returns 0, macaddr in macaddr if successful; -1 otherwise. */ static int ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr) { char devid_str[32]; int local_mac = 0; char *local_macstr; /* * Fetch from the kenv rather than using hints. * * Hints would be nice but the transition to dynamic * hints/kenv doesn't happen early enough for this * to work reliably (eg on anything embedded.) */ snprintf(devid_str, 32, "hint.%s.%d.macaddr", device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); if ((local_macstr = kern_getenv(devid_str)) != NULL) { uint32_t tmpmac[ETHER_ADDR_LEN]; int count; int i; /* Have a MAC address; should use it */ device_printf(sc->sc_dev, "Overriding MAC address from environment: '%s'\n", local_macstr); /* Extract out the MAC address */ count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", &tmpmac[0], &tmpmac[1], &tmpmac[2], &tmpmac[3], &tmpmac[4], &tmpmac[5]); if (count == 6) { /* Valid! */ local_mac = 1; for (i = 0; i < ETHER_ADDR_LEN; i++) macaddr[i] = tmpmac[i]; } /* Done! */ freeenv(local_macstr); local_macstr = NULL; } if (local_mac) return (0); return (-1); } #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) #define HAL_MODE_HT40 \ (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) int ath_attach(u_int16_t devid, struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = NULL; HAL_STATUS status; int error = 0, i; u_int wmodes; int rx_chainmask, tx_chainmask; HAL_OPS_CONFIG ah_config; DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* * Configure the initial configuration data. * * This is stuff that may be needed early during attach * rather than done via configuration calls later. */ bzero(&ah_config, sizeof(ah_config)); ath_setup_hal_config(sc, &ah_config); ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, sc->sc_eepromdata, &ah_config, &status); if (ah == NULL) { device_printf(sc->sc_dev, "unable to attach hardware; HAL status %u\n", status); error = ENXIO; goto bad; } sc->sc_ah = ah; sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ #ifdef ATH_DEBUG sc->sc_debug = ath_debug; #endif /* * Setup the DMA/EDMA functions based on the current * hardware support. * * This is required before the descriptors are allocated. */ if (ath_hal_hasedma(sc->sc_ah)) { sc->sc_isedma = 1; ath_recv_setup_edma(sc); ath_xmit_setup_edma(sc); } else { ath_recv_setup_legacy(sc); ath_xmit_setup_legacy(sc); } if (ath_hal_hasmybeacon(sc->sc_ah)) { sc->sc_do_mybeacon = 1; } /* * Check if the MAC has multi-rate retry support. * We do this by trying to setup a fake extended * descriptor. MAC's that don't have support will * return false w/o doing anything. MAC's that do * support it will return true w/o doing anything. */ sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); /* * Check if the device has hardware counters for PHY * errors. If so we need to enable the MIB interrupt * so we can act on stat triggers. */ if (ath_hal_hwphycounters(ah)) sc->sc_needmib = 1; /* * Get the hardware key cache size. */ sc->sc_keymax = ath_hal_keycachesize(ah); if (sc->sc_keymax > ATH_KEYMAX) { device_printf(sc->sc_dev, "Warning, using only %u of %u key cache slots\n", ATH_KEYMAX, sc->sc_keymax); sc->sc_keymax = ATH_KEYMAX; } /* * Reset the key cache since some parts do not * reset the contents on initial power up. */ for (i = 0; i < sc->sc_keymax; i++) ath_hal_keyreset(ah, i); /* * Collect the default channel list. */ error = ath_getchannels(sc); if (error != 0) goto bad; /* * Setup rate tables for all potential media types. */ ath_rate_setup(sc, IEEE80211_MODE_11A); ath_rate_setup(sc, IEEE80211_MODE_11B); ath_rate_setup(sc, IEEE80211_MODE_11G); ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); ath_rate_setup(sc, IEEE80211_MODE_11NA); ath_rate_setup(sc, IEEE80211_MODE_11NG); ath_rate_setup(sc, IEEE80211_MODE_HALF); ath_rate_setup(sc, IEEE80211_MODE_QUARTER); /* NB: setup here so ath_rate_update is happy */ ath_setcurmode(sc, IEEE80211_MODE_11A); /* * Allocate TX descriptors and populate the lists. */ error = ath_desc_alloc(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate TX descriptors: %d\n", error); goto bad; } error = ath_txdma_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate TX descriptors: %d\n", error); goto bad; } /* * Allocate RX descriptors and populate the lists. */ error = ath_rxdma_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate RX descriptors: %d\n", error); goto bad; } callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); ATH_TXBUF_LOCK_INIT(sc); sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); /* * Allocate hardware transmit queues: one queue for * beacon frames and one data queue for each QoS * priority. Note that the hal handles resetting * these queues at the needed time. * * XXX PS-Poll */ sc->sc_bhalq = ath_beaconq_setup(sc); if (sc->sc_bhalq == (u_int) -1) { device_printf(sc->sc_dev, "unable to setup a beacon xmit queue!\n"); error = EIO; goto bad2; } sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); if (sc->sc_cabq == NULL) { device_printf(sc->sc_dev, "unable to setup CAB xmit queue!\n"); error = EIO; goto bad2; } /* NB: insure BK queue is the lowest priority h/w queue */ if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { device_printf(sc->sc_dev, "unable to setup xmit queue for %s traffic!\n", ieee80211_wme_acnames[WME_AC_BK]); error = EIO; goto bad2; } if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { /* * Not enough hardware tx queues to properly do WME; * just punt and assign them all to the same h/w queue. * We could do a better job of this if, for example, * we allocate queues when we switch from station to * AP mode. */ if (sc->sc_ac2q[WME_AC_VI] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); if (sc->sc_ac2q[WME_AC_BE] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; } /* * Attach the TX completion function. * * The non-EDMA chips may have some special case optimisations; * this method gives everyone a chance to attach cleanly. */ sc->sc_tx.xmit_attach_comp_func(sc); /* * Setup rate control. Some rate control modules * call back to change the anntena state so expose * the necessary entry points. * XXX maybe belongs in struct ath_ratectrl? */ sc->sc_setdefantenna = ath_setdefantenna; sc->sc_rc = ath_rate_attach(sc); if (sc->sc_rc == NULL) { error = EIO; goto bad2; } /* Attach DFS module */ if (! ath_dfs_attach(sc)) { device_printf(sc->sc_dev, "%s: unable to attach DFS\n", __func__); error = EIO; goto bad2; } /* Attach spectral module */ if (ath_spectral_attach(sc) < 0) { device_printf(sc->sc_dev, "%s: unable to attach spectral\n", __func__); error = EIO; goto bad2; } /* Attach bluetooth coexistence module */ if (ath_btcoex_attach(sc) < 0) { device_printf(sc->sc_dev, "%s: unable to attach bluetooth coexistence\n", __func__); error = EIO; goto bad2; } /* Attach LNA diversity module */ if (ath_lna_div_attach(sc) < 0) { device_printf(sc->sc_dev, "%s: unable to attach LNA diversity\n", __func__); error = EIO; goto bad2; } /* Start DFS processing tasklet */ TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); /* Configure LED state */ sc->sc_blinking = 0; sc->sc_ledstate = 1; sc->sc_ledon = 0; /* low true */ sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ callout_init(&sc->sc_ledtimer, 1); /* * Don't setup hardware-based blinking. * * Although some NICs may have this configured in the * default reset register values, the user may wish * to alter which pins have which function. * * The reference driver attaches the MAC network LED to GPIO1 and * the MAC power LED to GPIO2. However, the DWA-552 cardbus * NIC has these reversed. */ sc->sc_hardled = (1 == 0); sc->sc_led_net_pin = -1; sc->sc_led_pwr_pin = -1; /* * Auto-enable soft led processing for IBM cards and for * 5211 minipci cards. Users can also manually enable/disable * support with a sysctl. */ sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); ath_led_config(sc); ath_hal_setledstate(ah, HAL_LED_INIT); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ #ifndef ATH_ENABLE_11N | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_TXFRAG /* handle tx frags */ #ifdef ATH_ENABLE_DFS | IEEE80211_C_DFS /* Enable radar detection */ #endif | IEEE80211_C_PMGT /* Station side power mgmt */ | IEEE80211_C_SWSLEEP ; /* * Query the hal to figure out h/w crypto support. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; /* * Check if h/w does the MIC and/or whether the * separate key cache entries are required to * handle both tx+rx MIC keys. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; /* * If the h/w supports storing tx+rx MIC keys * in one cache slot automatically enable use. */ if (ath_hal_hastkipsplit(ah) || !ath_hal_settkipsplit(ah, AH_FALSE)) sc->sc_splitmic = 1; /* * If the h/w can do TKIP MIC together with WME then * we use it; otherwise we force the MIC to be done * in software by the net80211 layer. */ if (ath_hal_haswmetkipmic(ah)) sc->sc_wmetkipmic = 1; } sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); /* * Check for multicast key search support. */ if (ath_hal_hasmcastkeysearch(sc->sc_ah) && !ath_hal_getmcastkeysearch(sc->sc_ah)) { ath_hal_setmcastkeysearch(sc->sc_ah, 1); } sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); /* * Mark key cache slots associated with global keys * as in use. If we knew TKIP was not to be used we * could leave the +32, +64, and +32+64 slots free. */ for (i = 0; i < IEEE80211_WEP_NKID; i++) { setbit(sc->sc_keymap, i); setbit(sc->sc_keymap, i+64); if (sc->sc_splitmic) { setbit(sc->sc_keymap, i+32); setbit(sc->sc_keymap, i+32+64); } } /* * TPC support can be done either with a global cap or * per-packet support. The latter is not available on * all parts. We're a bit pedantic here as all parts * support a global cap. */ if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) ic->ic_caps |= IEEE80211_C_TXPMGT; /* * Mark WME capability only if we have sufficient * hardware queues to do proper priority scheduling. */ if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) ic->ic_caps |= IEEE80211_C_WME; /* * Check for misc other capabilities. */ if (ath_hal_hasbursting(ah)) ic->ic_caps |= IEEE80211_C_BURST; sc->sc_hasbmask = ath_hal_hasbssidmask(ah); sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); /* XXX TODO: just make this a "store tx/rx timestamp length" operation */ if (ath_hal_get_rx_tsf_prec(ah, &i)) { if (i == 32) { sc->sc_rxtsf32 = 1; } if (bootverbose) device_printf(sc->sc_dev, "RX timestamp: %d bits\n", i); } if (ath_hal_get_tx_tsf_prec(ah, &i)) { if (bootverbose) device_printf(sc->sc_dev, "TX timestamp: %d bits\n", i); } sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); if (ath_hal_hasfastframes(ah)) ic->ic_caps |= IEEE80211_C_FF; wmodes = ath_hal_getwirelessmodes(ah); if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) ic->ic_caps |= IEEE80211_C_TURBOP; #ifdef IEEE80211_SUPPORT_TDMA if (ath_hal_macversion(ah) > 0x78) { ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ ic->ic_tdma_update = ath_tdma_update; } #endif /* * TODO: enforce that at least this many frames are available * in the txbuf list before allowing data frames (raw or * otherwise) to be transmitted. */ sc->sc_txq_data_minfree = 10; + /* - * Leave this as default to maintain legacy behaviour. - * Shortening the cabq/mcastq may end up causing some - * undesirable behaviour. + * Shorten this to 64 packets, or 1/4 ath_txbuf, whichever + * is smaller. + * + * Anything bigger can potentially see the cabq consume + * almost all buffers, starving everything else, only to + * see most fail to transmit in the given beacon interval. */ - sc->sc_txq_mcastq_maxdepth = ath_txbuf; + sc->sc_txq_mcastq_maxdepth = MIN(64, ath_txbuf / 4); /* * How deep can the node software TX queue get whilst it's asleep. */ sc->sc_txq_node_psq_maxdepth = 16; /* - * Default the maximum queue depth for a given node - * to 1/4'th the TX buffers, or 64, whichever - * is larger. + * Default the maximum queue to to 1/4'th the TX buffers, or + * 64, whichever is smaller. */ - sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); + sc->sc_txq_node_maxdepth = MIN(64, ath_txbuf / 4); /* Enable CABQ by default */ sc->sc_cabq_enable = 1; /* * Allow the TX and RX chainmasks to be overridden by * environment variables and/or device.hints. * * This must be done early - before the hardware is * calibrated or before the 802.11n stream calculation * is done. */ if (resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "rx_chainmask", &rx_chainmask) == 0) { device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", rx_chainmask); (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); } if (resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "tx_chainmask", &tx_chainmask) == 0) { device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", tx_chainmask); (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); } /* * Query the TX/RX chainmask configuration. * * This is only relevant for 11n devices. */ ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); /* * Disable MRR with protected frames by default. * Only 802.11n series NICs can handle this. */ sc->sc_mrrprot = 0; /* XXX should be a capability */ /* * Query the enterprise mode information the HAL. */ if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, &sc->sc_ent_cfg) == HAL_OK) sc->sc_use_ent = 1; #ifdef ATH_ENABLE_11N /* * Query HT capabilities */ if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { uint32_t rxs, txs; uint32_t ldpc; device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); sc->sc_mrrprot = 1; /* XXX should be a capability */ ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ /* * Enable short-GI for HT20 only if the hardware * advertises support. * Notably, anything earlier than the AR9287 doesn't. */ if ((ath_hal_getcapability(ah, HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && (wmodes & HAL_MODE_HT20)) { device_printf(sc->sc_dev, "[HT] enabling short-GI in 20MHz mode\n"); ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; } if (wmodes & HAL_MODE_HT40) ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 | IEEE80211_HTCAP_SHORTGI40; /* * TX/RX streams need to be taken into account when * negotiating which MCS rates it'll receive and * what MCS rates are available for TX. */ (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); ic->ic_txstream = txs; ic->ic_rxstream = rxs; /* * Setup TX and RX STBC based on what the HAL allows and * the currently configured chainmask set. * Ie - don't enable STBC TX if only one chain is enabled. * STBC RX is fine on a single RX chain; it just won't * provide any real benefit. */ if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, NULL) == HAL_OK) { sc->sc_rx_stbc = 1; device_printf(sc->sc_dev, "[HT] 1 stream STBC receive enabled\n"); ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; } if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, NULL) == HAL_OK) { sc->sc_tx_stbc = 1; device_printf(sc->sc_dev, "[HT] 1 stream STBC transmit enabled\n"); ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; } (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, &sc->sc_rts_aggr_limit); if (sc->sc_rts_aggr_limit != (64 * 1024)) device_printf(sc->sc_dev, "[HT] RTS aggregates limited to %d KiB\n", sc->sc_rts_aggr_limit / 1024); /* * LDPC */ if ((ath_hal_getcapability(ah, HAL_CAP_LDPC, 0, &ldpc)) == HAL_OK && (ldpc == 1)) { sc->sc_has_ldpc = 1; device_printf(sc->sc_dev, "[HT] LDPC transmit/receive enabled\n"); ic->ic_htcaps |= IEEE80211_HTCAP_LDPC | IEEE80211_HTC_TXLDPC; } device_printf(sc->sc_dev, "[HT] %d RX streams; %d TX streams\n", rxs, txs); } #endif /* * Initial aggregation settings. */ sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; sc->sc_delim_min_pad = 0; /* * Check if the hardware requires PCI register serialisation. * Some of the Owl based MACs require this. */ if (mp_ncpus > 1 && ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 0, NULL) == HAL_OK) { sc->sc_ah->ah_config.ah_serialise_reg_war = 1; device_printf(sc->sc_dev, "Enabling register serialisation\n"); } /* * Initialise the deferred completed RX buffer list. */ TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); /* * Indicate we need the 802.11 header padded to a * 32-bit boundary for 4-address and QoS frames. */ ic->ic_flags |= IEEE80211_F_DATAPAD; /* * Query the hal about antenna support. */ sc->sc_defant = ath_hal_getdefantenna(ah); /* * Not all chips have the VEOL support we want to * use with IBSS beacons; check here for it. */ sc->sc_hasveol = ath_hal_hasveol(ah); /* get mac address from kenv first, then hardware */ if (ath_fetch_mac_kenv(sc, ic->ic_macaddr) == 0) { /* Tell the HAL now about the new MAC */ ath_hal_setmac(ah, ic->ic_macaddr); } else { ath_hal_getmac(ah, ic->ic_macaddr); } if (sc->sc_hasbmask) ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); /* NB: used to size node table key mapping array */ ic->ic_max_keyix = sc->sc_keymax; /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_setregdomain = ath_setregdomain; ic->ic_getradiocaps = ath_getradiocaps; sc->sc_opmode = HAL_M_STA; /* override default methods */ ic->ic_ioctl = ath_ioctl; ic->ic_parent = ath_parent; ic->ic_transmit = ath_transmit; ic->ic_newassoc = ath_newassoc; ic->ic_updateslot = ath_updateslot; ic->ic_wme.wme_update = ath_wme_update; ic->ic_vap_create = ath_vap_create; ic->ic_vap_delete = ath_vap_delete; ic->ic_raw_xmit = ath_raw_xmit; ic->ic_update_mcast = ath_update_mcast; ic->ic_update_promisc = ath_update_promisc; ic->ic_node_alloc = ath_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = ath_node_free; sc->sc_node_cleanup = ic->ic_node_cleanup; ic->ic_node_cleanup = ath_node_cleanup; ic->ic_node_getsignal = ath_node_getsignal; ic->ic_scan_start = ath_scan_start; ic->ic_scan_end = ath_scan_end; ic->ic_set_channel = ath_set_channel; #ifdef ATH_ENABLE_11N /* 802.11n specific - but just override anyway */ sc->sc_addba_request = ic->ic_addba_request; sc->sc_addba_response = ic->ic_addba_response; sc->sc_addba_stop = ic->ic_addba_stop; sc->sc_bar_response = ic->ic_bar_response; sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; ic->ic_addba_request = ath_addba_request; ic->ic_addba_response = ath_addba_response; ic->ic_addba_response_timeout = ath_addba_response_timeout; ic->ic_addba_stop = ath_addba_stop; ic->ic_bar_response = ath_bar_response; ic->ic_update_chw = ath_update_chw; #endif /* ATH_ENABLE_11N */ #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT /* * There's one vendor bitmap entry in the RX radiotap * header; make sure that's taken into account. */ ieee80211_radiotap_attachv(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, ATH_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, ATH_RX_RADIOTAP_PRESENT); #else /* * No vendor bitmap/extensions are present. */ ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), ATH_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), ATH_RX_RADIOTAP_PRESENT); #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ /* * Setup the ALQ logging if required */ #ifdef ATH_DEBUG_ALQ if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); if_ath_alq_setcfg(&sc->sc_alq, sc->sc_ah->ah_macVersion, sc->sc_ah->ah_macRev, sc->sc_ah->ah_phyRev, sc->sc_ah->ah_magic); #endif /* * Setup dynamic sysctl's now that country code and * regdomain are available from the hal. */ ath_sysctlattach(sc); ath_sysctl_stats_attach(sc); ath_sysctl_hal_attach(sc); if (bootverbose) ieee80211_announce(ic); ath_announce(sc); /* * Put it to sleep for now. */ ATH_LOCK(sc); ath_power_setpower(sc, HAL_PM_FULL_SLEEP, 1); ATH_UNLOCK(sc); return 0; bad2: ath_tx_cleanup(sc); ath_desc_free(sc); ath_txdma_teardown(sc); ath_rxdma_teardown(sc); bad: if (ah) ath_hal_detach(ah); sc->sc_invalid = 1; return error; } int ath_detach(struct ath_softc *sc) { /* * NB: the order of these is important: * o stop the chip so no more interrupts will fire * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o free the taskqueue which drains any pending tasks * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ /* * XXX Wake the hardware up first. ath_stop() will still * wake it up first, but I'd rather do it here just to * ensure it's awake. */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); /* * Stop things cleanly. */ ath_stop(sc); ATH_UNLOCK(sc); ieee80211_ifdetach(&sc->sc_ic); taskqueue_free(sc->sc_tq); #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->detach(sc->sc_tx99); #endif ath_rate_detach(sc->sc_rc); #ifdef ATH_DEBUG_ALQ if_ath_alq_tidyup(&sc->sc_alq); #endif ath_lna_div_detach(sc); ath_btcoex_detach(sc); ath_spectral_detach(sc); ath_dfs_detach(sc); ath_desc_free(sc); ath_txdma_teardown(sc); ath_rxdma_teardown(sc); ath_tx_cleanup(sc); ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ return 0; } /* * MAC address handling for multiple BSS on the same radio. * The first vap uses the MAC address from the EEPROM. For * subsequent vap's we set the U/L bit (bit 1) in the MAC * address and use the next six bits as an index. */ static void assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) { int i; if (clone && sc->sc_hasbmask) { /* NB: we only do this if h/w supports multiple bssid */ for (i = 0; i < 8; i++) if ((sc->sc_bssidmask & (1<sc_bssidmask |= 1<sc_hwbssidmask[0] &= ~mac[0]; if (i == 0) sc->sc_nbssid0++; } static void reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) { int i = mac[0] >> 2; uint8_t mask; if (i != 0 || --sc->sc_nbssid0 == 0) { sc->sc_bssidmask &= ~(1<sc_bssidmask & (1<sc_hwbssidmask[0] |= mask; } } /* * Assign a beacon xmit slot. We try to space out * assignments so when beacons are staggered the * traffic coming out of the cab q has maximal time * to go out before the next beacon is scheduled. */ static int assign_bslot(struct ath_softc *sc) { u_int slot, free; free = 0; for (slot = 0; slot < ATH_BCBUF; slot++) if (sc->sc_bslot[slot] == NULL) { if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) return slot; free = slot; /* NB: keep looking for a double slot */ } return free; } static struct ieee80211vap * ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac0[IEEE80211_ADDR_LEN]) { struct ath_softc *sc = ic->ic_softc; struct ath_vap *avp; struct ieee80211vap *vap; uint8_t mac[IEEE80211_ADDR_LEN]; int needbeacon, error; enum ieee80211_opmode ic_opmode; avp = malloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO); needbeacon = 0; IEEE80211_ADDR_COPY(mac, mac0); ATH_LOCK(sc); ic_opmode = opmode; /* default to opmode of new vap */ switch (opmode) { case IEEE80211_M_STA: if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ device_printf(sc->sc_dev, "only 1 sta vap supported\n"); goto bad; } if (sc->sc_nvaps) { /* * With multiple vaps we must fall back * to s/w beacon miss handling. */ flags |= IEEE80211_CLONE_NOBEACONS; } if (flags & IEEE80211_CLONE_NOBEACONS) { /* * Station mode w/o beacons are implemented w/ AP mode. */ ic_opmode = IEEE80211_M_HOSTAP; } break; case IEEE80211_M_IBSS: if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ device_printf(sc->sc_dev, "only 1 ibss vap supported\n"); goto bad; } needbeacon = 1; break; case IEEE80211_M_AHDEMO: #ifdef IEEE80211_SUPPORT_TDMA if (flags & IEEE80211_CLONE_TDMA) { if (sc->sc_nvaps != 0) { device_printf(sc->sc_dev, "only 1 tdma vap supported\n"); goto bad; } needbeacon = 1; flags |= IEEE80211_CLONE_NOBEACONS; } /* fall thru... */ #endif case IEEE80211_M_MONITOR: if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { /* * Adopt existing mode. Adding a monitor or ahdemo * vap to an existing configuration is of dubious * value but should be ok. */ /* XXX not right for monitor mode */ ic_opmode = ic->ic_opmode; } break; case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: needbeacon = 1; break; case IEEE80211_M_WDS: if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { device_printf(sc->sc_dev, "wds not supported in sta mode\n"); goto bad; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; if (sc->sc_nvaps == 0) ic_opmode = IEEE80211_M_HOSTAP; else ic_opmode = ic->ic_opmode; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); goto bad; } /* * Check that a beacon buffer is available; the code below assumes it. */ if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { device_printf(sc->sc_dev, "no beacon buffer available\n"); goto bad; } /* STA, AHDEMO? */ if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); } vap = &avp->av_vap; /* XXX can't hold mutex across if_alloc */ ATH_UNLOCK(sc); error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); ATH_LOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: error %d creating vap\n", __func__, error); goto bad2; } /* h/w crypto support */ vap->iv_key_alloc = ath_key_alloc; vap->iv_key_delete = ath_key_delete; vap->iv_key_set = ath_key_set; vap->iv_key_update_begin = ath_key_update_begin; vap->iv_key_update_end = ath_key_update_end; /* override various methods */ avp->av_recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = ath_recv_mgmt; vap->iv_reset = ath_reset_vap; vap->iv_update_beacon = ath_beacon_update; avp->av_newstate = vap->iv_newstate; vap->iv_newstate = ath_newstate; avp->av_bmiss = vap->iv_bmiss; vap->iv_bmiss = ath_bmiss_vap; avp->av_node_ps = vap->iv_node_ps; vap->iv_node_ps = ath_node_powersave; avp->av_set_tim = vap->iv_set_tim; vap->iv_set_tim = ath_node_set_tim; avp->av_recv_pspoll = vap->iv_recv_pspoll; vap->iv_recv_pspoll = ath_node_recv_pspoll; /* Set default parameters */ /* * Anything earlier than some AR9300 series MACs don't * support a smaller MPDU density. */ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; /* * All NICs can handle the maximum size, however * AR5416 based MACs can only TX aggregates w/ RTS * protection when the total aggregate size is <= 8k. * However, for now that's enforced by the TX path. */ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; vap->iv_ampdu_limit = IEEE80211_HTCAP_MAXRXAMPDU_64K; avp->av_bslot = -1; if (needbeacon) { /* * Allocate beacon state and setup the q for buffered * multicast frames. We know a beacon buffer is * available because we checked above. */ avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { /* * Assign the vap to a beacon xmit slot. As above * this cannot fail to find a free one. */ avp->av_bslot = assign_bslot(sc); KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, ("beacon slot %u not empty", avp->av_bslot)); sc->sc_bslot[avp->av_bslot] = vap; sc->sc_nbcnvaps++; } if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { /* * Multple vaps are to transmit beacons and we * have h/w support for TSF adjusting; enable * use of staggered beacons. */ sc->sc_stagbeacons = 1; } ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); } ic->ic_opmode = ic_opmode; if (opmode != IEEE80211_M_WDS) { sc->sc_nvaps++; if (opmode == IEEE80211_M_STA) sc->sc_nstavaps++; if (opmode == IEEE80211_M_MBSS) sc->sc_nmeshvaps++; } switch (ic_opmode) { case IEEE80211_M_IBSS: sc->sc_opmode = HAL_M_IBSS; break; case IEEE80211_M_STA: sc->sc_opmode = HAL_M_STA; break; case IEEE80211_M_AHDEMO: #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { sc->sc_tdma = 1; /* NB: disable tsf adjust */ sc->sc_stagbeacons = 0; } /* * NB: adhoc demo mode is a pseudo mode; to the hal it's * just ap mode. */ /* fall thru... */ #endif case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: sc->sc_opmode = HAL_M_HOSTAP; break; case IEEE80211_M_MONITOR: sc->sc_opmode = HAL_M_MONITOR; break; default: /* XXX should not happen */ break; } if (sc->sc_hastsfadd) { /* * Configure whether or not TSF adjust should be done. */ ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); } if (flags & IEEE80211_CLONE_NOBEACONS) { /* * Enable s/w beacon miss handling. */ sc->sc_swbmiss = 1; } ATH_UNLOCK(sc); /* complete setup */ ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status, mac); return vap; bad2: reclaim_address(sc, mac); ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); bad: free(avp, M_80211_VAP); ATH_UNLOCK(sc); return NULL; } static void ath_vap_delete(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; struct ath_vap *avp = ATH_VAP(vap); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); if (sc->sc_running) { /* * Quiesce the hardware while we remove the vap. In * particular we need to reclaim all references to * the vap state by any frames pending on the tx queues. */ ath_hal_intrset(ah, 0); /* disable interrupts */ /* XXX Do all frames from all vaps/nodes need draining here? */ ath_stoprecv(sc, 1); /* stop recv side */ ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ } /* .. leave the hardware awake for now. */ ieee80211_vap_detach(vap); /* * XXX Danger Will Robinson! Danger! * * Because ieee80211_vap_detach() can queue a frame (the station * diassociate message?) after we've drained the TXQ and * flushed the software TXQ, we will end up with a frame queued * to a node whose vap is about to be freed. * * To work around this, flush the hardware/software again. * This may be racy - the ath task may be running and the packet * may be being scheduled between sw->hw txq. Tsk. * * TODO: figure out why a new node gets allocated somewhere around * here (after the ath_tx_swq() call; and after an ath_stop() * call!) */ ath_draintxq(sc, ATH_RESET_DEFAULT); ATH_LOCK(sc); /* * Reclaim beacon state. Note this must be done before * the vap instance is reclaimed as we may have a reference * to it in the buffer for the beacon frame. */ if (avp->av_bcbuf != NULL) { if (avp->av_bslot != -1) { sc->sc_bslot[avp->av_bslot] = NULL; sc->sc_nbcnvaps--; } ath_beacon_return(sc, avp->av_bcbuf); avp->av_bcbuf = NULL; if (sc->sc_nbcnvaps == 0) { sc->sc_stagbeacons = 0; if (sc->sc_hastsfadd) ath_hal_settsfadjust(sc->sc_ah, 0); } /* * Reclaim any pending mcast frames for the vap. */ ath_tx_draintxq(sc, &avp->av_mcastq); } /* * Update bookkeeping. */ if (vap->iv_opmode == IEEE80211_M_STA) { sc->sc_nstavaps--; if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) sc->sc_swbmiss = 0; } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) { reclaim_address(sc, vap->iv_myaddr); ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); if (vap->iv_opmode == IEEE80211_M_MBSS) sc->sc_nmeshvaps--; } if (vap->iv_opmode != IEEE80211_M_WDS) sc->sc_nvaps--; #ifdef IEEE80211_SUPPORT_TDMA /* TDMA operation ceases when the last vap is destroyed */ if (sc->sc_tdma && sc->sc_nvaps == 0) { sc->sc_tdma = 0; sc->sc_swbmiss = 0; } #endif free(avp, M_80211_VAP); if (sc->sc_running) { /* * Restart rx+tx machines if still running (RUNNING will * be reset if we just destroyed the last vap). */ if (ath_startrecv(sc) != 0) device_printf(sc->sc_dev, "%s: unable to restart recv logic\n", __func__); if (sc->sc_beacons) { /* restart beacons */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) ath_tdma_config(sc, NULL); else #endif ath_beacon_config(sc, NULL); } ath_hal_intrset(ah, sc->sc_imask); } /* Ok, let the hardware asleep. */ ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } void ath_suspend(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; sc->sc_resume_up = ic->ic_nrunning != 0; ieee80211_suspend_all(ic); /* * NB: don't worry about putting the chip in low power * mode; pci will power off our socket on suspend and * CardBus detaches the device. * * XXX TODO: well, that's great, except for non-cardbus * devices! */ /* * XXX This doesn't wait until all pending taskqueue * items and parallel transmit/receive/other threads * are running! */ ath_hal_intrset(sc->sc_ah, 0); taskqueue_block(sc->sc_tq); ATH_LOCK(sc); callout_stop(&sc->sc_cal_ch); ATH_UNLOCK(sc); /* * XXX ensure sc_invalid is 1 */ /* Disable the PCIe PHY, complete with workarounds */ ath_hal_enablepcie(sc->sc_ah, 1, 1); } /* * Reset the key cache since some parts do not reset the * contents on resume. First we clear all entries, then * re-load keys that the 802.11 layer assumes are setup * in h/w. */ static void ath_reset_keycache(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; int i; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); for (i = 0; i < sc->sc_keymax; i++) ath_hal_keyreset(ah, i); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ieee80211_crypto_reload_keys(ic); } /* * Fetch the current chainmask configuration based on the current * operating channel and options. */ static void ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) { /* * Set TX chainmask to the currently configured chainmask; * the TX chainmask depends upon the current operating mode. */ sc->sc_cur_rxchainmask = sc->sc_rxchainmask; if (IEEE80211_IS_CHAN_HT(chan)) { sc->sc_cur_txchainmask = sc->sc_txchainmask; } else { sc->sc_cur_txchainmask = 1; } DPRINTF(sc, ATH_DEBUG_RESET, "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", __func__, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); } void ath_resume(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; ath_hal_enablepcie(ah, 0, 0); /* * Must reset the chip before we reload the * keycache as we were powered down on suspend. */ ath_update_chainmasks(sc, sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); /* Ensure we set the current power state to on */ ATH_LOCK(sc); ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); ATH_UNLOCK(sc); ath_hal_reset(ah, sc->sc_opmode, sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, AH_FALSE, HAL_RESET_NORMAL, &status); ath_reset_keycache(sc); ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, ic->ic_curchan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, ic->ic_curchan); /* * Let bluetooth coexistence at in case it's needed for this channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips that * support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); /* Restore the LED configuration */ ath_led_config(sc); ath_hal_setledstate(ah, HAL_LED_INIT); if (sc->sc_resume_up) ieee80211_resume_all(ic); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); /* XXX beacons ? */ } void ath_shutdown(struct ath_softc *sc) { ATH_LOCK(sc); ath_stop(sc); ATH_UNLOCK(sc); /* NB: no point powering down chip as we're about to reboot */ } /* * Interrupt handler. Most of the actual processing is deferred. */ void ath_intr(void *arg) { struct ath_softc *sc = arg; struct ath_hal *ah = sc->sc_ah; HAL_INT status = 0; uint32_t txqs; /* * If we're inside a reset path, just print a warning and * clear the ISR. The reset routine will finish it for us. */ ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt) { HAL_INT status; ath_hal_getisr(ah, &status); /* clear ISR */ ath_hal_intrset(ah, 0); /* disable further intr's */ DPRINTF(sc, ATH_DEBUG_ANY, "%s: in reset, ignoring: status=0x%x\n", __func__, status); ATH_PCU_UNLOCK(sc); return; } if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); ATH_PCU_UNLOCK(sc); return; } if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ ATH_PCU_UNLOCK(sc); return; } ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); if (sc->sc_ic.ic_nrunning == 0 && sc->sc_running == 0) { HAL_INT status; DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_nrunning %d sc_running %d\n", __func__, sc->sc_ic.ic_nrunning, sc->sc_running); ath_hal_getisr(ah, &status); /* clear ISR */ ath_hal_intrset(ah, 0); /* disable further intr's */ ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return; } /* * Figure out the reason(s) for the interrupt. Note * that the hal returns a pseudo-ISR that may include * bits we haven't explicitly enabled so we mask the * value to insure we only process bits we requested. */ ath_hal_getisr(ah, &status); /* NB: clears ISR too */ DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); #ifdef ATH_DEBUG_ALQ if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, ah->ah_syncstate); #endif /* ATH_DEBUG_ALQ */ #ifdef ATH_KTR_INTR_DEBUG ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", ah->ah_intrstate[0], ah->ah_intrstate[1], ah->ah_intrstate[2], ah->ah_intrstate[3], ah->ah_intrstate[6]); #endif /* Squirrel away SYNC interrupt debugging */ if (ah->ah_syncstate != 0) { int i; for (i = 0; i < 32; i++) if (ah->ah_syncstate & (i << i)) sc->sc_intr_stats.sync_intr[i]++; } status &= sc->sc_imask; /* discard unasked for bits */ /* Short-circuit un-handled interrupts */ if (status == 0x0) { ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return; } /* * Take a note that we're inside the interrupt handler, so * the reset routines know to wait. */ sc->sc_intr_cnt++; ATH_PCU_UNLOCK(sc); /* * Handle the interrupt. We won't run concurrent with the reset * or channel change routines as they'll wait for sc_intr_cnt * to be 0 before continuing. */ if (status & HAL_INT_FATAL) { sc->sc_stats.ast_hardware++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); } else { if (status & HAL_INT_SWBA) { /* * Software beacon alert--time to send a beacon. * Handle beacon transmission directly; deferring * this is too slow to meet timing constraints * under load. */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) { if (sc->sc_tdmaswba == 0) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); ath_tdma_beacon_send(sc, vap); sc->sc_tdmaswba = vap->iv_tdma->tdma_bintval; } else sc->sc_tdmaswba--; } else #endif { ath_beacon_proc(sc, 0); #ifdef IEEE80211_SUPPORT_SUPERG /* * Schedule the rx taskq in case there's no * traffic so any frames held on the staging * queue are aged and potentially flushed. */ sc->sc_rx.recv_sched(sc, 1); #endif } } if (status & HAL_INT_RXEOL) { int imask; ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); if (! sc->sc_isedma) { ATH_PCU_LOCK(sc); /* * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs. */ sc->sc_stats.ast_rxeol++; /* * Disable RXEOL/RXORN - prevent an interrupt * storm until the PCU logic can be reset. * In case the interface is reset some other * way before "sc_kickpcu" is called, don't * modify sc_imask - that way if it is reset * by a call to ath_reset() somehow, the * interrupt mask will be correctly reprogrammed. */ imask = sc->sc_imask; imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); ath_hal_intrset(ah, imask); /* * Only blank sc_rxlink if we've not yet kicked * the PCU. * * This isn't entirely correct - the correct solution * would be to have a PCU lock and engage that for * the duration of the PCU fiddling; which would include * running the RX process. Otherwise we could end up * messing up the RX descriptor chain and making the * RX desc list much shorter. */ if (! sc->sc_kickpcu) sc->sc_rxlink = NULL; sc->sc_kickpcu = 1; ATH_PCU_UNLOCK(sc); } /* * Enqueue an RX proc to handle whatever * is in the RX queue. * This will then kick the PCU if required. */ sc->sc_rx.recv_sched(sc, 1); } if (status & HAL_INT_TXURN) { sc->sc_stats.ast_txurn++; /* bump tx trigger level */ ath_hal_updatetxtriglevel(ah, AH_TRUE); } /* * Handle both the legacy and RX EDMA interrupt bits. * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. */ if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { sc->sc_stats.ast_rx_intr++; sc->sc_rx.recv_sched(sc, 1); } if (status & HAL_INT_TX) { sc->sc_stats.ast_tx_intr++; /* * Grab all the currently set bits in the HAL txq bitmap * and blank them. This is the only place we should be * doing this. */ if (! sc->sc_isedma) { ATH_PCU_LOCK(sc); txqs = 0xffffffff; ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", txqs, sc->sc_txq_active, sc->sc_txq_active | txqs); sc->sc_txq_active |= txqs; ATH_PCU_UNLOCK(sc); } taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); } if (status & HAL_INT_BMISS) { sc->sc_stats.ast_bmiss++; taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); } if (status & HAL_INT_GTT) sc->sc_stats.ast_tx_timeout++; if (status & HAL_INT_CST) sc->sc_stats.ast_tx_cst++; if (status & HAL_INT_MIB) { sc->sc_stats.ast_mib++; ATH_PCU_LOCK(sc); /* * Disable interrupts until we service the MIB * interrupt; otherwise it will continue to fire. */ ath_hal_intrset(ah, 0); /* * Let the hal handle the event. We assume it will * clear whatever condition caused the interrupt. */ ath_hal_mibevent(ah, &sc->sc_halstats); /* * Don't reset the interrupt if we've just * kicked the PCU, or we may get a nested * RXEOL before the rxproc has had a chance * to run. */ if (sc->sc_kickpcu == 0) ath_hal_intrset(ah, sc->sc_imask); ATH_PCU_UNLOCK(sc); } if (status & HAL_INT_RXORN) { /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); sc->sc_stats.ast_rxorn++; } if (status & HAL_INT_TSFOOR) { /* out of range beacon - wake the chip up, * but don't modify self-gen frame config */ device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); sc->sc_syncbeacon = 1; ATH_LOCK(sc); ath_power_setpower(sc, HAL_PM_AWAKE, 0); ATH_UNLOCK(sc); } if (status & HAL_INT_MCI) { ath_btcoex_mci_intr(sc); } } ATH_PCU_LOCK(sc); sc->sc_intr_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } static void ath_fatal_proc(void *arg, int pending) { struct ath_softc *sc = arg; u_int32_t *state; u_int32_t len; void *sp; if (sc->sc_invalid) return; device_printf(sc->sc_dev, "hardware error; resetting\n"); /* * Fatal errors are unrecoverable. Typically these * are caused by DMA errors. Collect h/w state from * the hal so we can diagnose what's going on. */ if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); state = sp; device_printf(sc->sc_dev, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", state[0], state[1] , state[2], state[3], state[4], state[5]); } ath_reset(sc, ATH_RESET_NOLOSS); } static void ath_bmiss_vap(struct ieee80211vap *vap) { struct ath_softc *sc = vap->iv_ic->ic_softc; /* * Workaround phantom bmiss interrupts by sanity-checking * the time of our last rx'd frame. If it is within the * beacon miss interval then ignore the interrupt. If it's * truly a bmiss we'll get another interrupt soon and that'll * be dispatched up for processing. Note this applies only * for h/w beacon miss events. */ /* * XXX TODO: Just read the TSF during the interrupt path; * that way we don't have to wake up again just to read it * again. */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { u_int64_t lastrx = sc->sc_lastrx; u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); /* XXX should take a locked ref to iv_bss */ u_int bmisstimeout = vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", __func__, (unsigned long long) tsf, (unsigned long long)(tsf - lastrx), (unsigned long long) lastrx, bmisstimeout); if (tsf - lastrx <= bmisstimeout) { sc->sc_stats.ast_bmiss_phantom++; ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return; } } /* * Keep the hardware awake if it's asleep (and leave self-gen * frame config alone) until the next beacon, so we can resync * against the next beacon. * * This handles three common beacon miss cases in STA powersave mode - * (a) the beacon TBTT isnt a multiple of bintval; * (b) the beacon was missed; and * (c) the beacons are being delayed because the AP is busy and * isn't reliably able to meet its TBTT. */ ATH_LOCK(sc); ath_power_setpower(sc, HAL_PM_AWAKE, 0); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: forced awake; force syncbeacon=1\n", __func__); /* * Attempt to force a beacon resync. */ sc->sc_syncbeacon = 1; ATH_VAP(vap)->av_bmiss(vap); } /* XXX this needs a force wakeup! */ int ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) { uint32_t rsize; void *sp; if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) return 0; KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); *hangs = *(uint32_t *)sp; return 1; } static void ath_bmiss_proc(void *arg, int pending) { struct ath_softc *sc = arg; uint32_t hangs; DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ath_beacon_miss(sc); /* * Do a reset upon any becaon miss event. * * It may be a non-recognised RX clear hang which needs a reset * to clear. */ if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { ath_reset(sc, ATH_RESET_NOLOSS); device_printf(sc->sc_dev, "bb hang detected (0x%x), resetting\n", hangs); } else { ath_reset(sc, ATH_RESET_NOLOSS); ieee80211_beacon_miss(&sc->sc_ic); } /* Force a beacon resync, in case they've drifted */ sc->sc_syncbeacon = 1; ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } /* * Handle TKIP MIC setup to deal hardware that doesn't do MIC * calcs together with WME. If necessary disable the crypto * hardware and mark the 802.11 state so keys will be setup * with the MIC work done in software. */ static void ath_settkipmic(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { if (ic->ic_flags & IEEE80211_F_WME) { ath_hal_settkipmic(sc->sc_ah, AH_FALSE); ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; } else { ath_hal_settkipmic(sc->sc_ah, AH_TRUE); ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; } } } static int ath_init(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; ATH_LOCK_ASSERT(sc); /* * Force the sleep state awake. */ ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); /* * Stop anything previously setup. This is safe * whether this is the first time through or not. */ ath_stop(sc); /* * The basic interface to setting the hardware in a good * state is ``reset''. On return the hardware is known to * be powered up and with interrupts disabled. This must * be followed by initialization of the appropriate bits * and then setup of the interrupt mask. */ ath_settkipmic(sc); ath_update_chainmasks(sc, ic->ic_curchan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, HAL_RESET_NORMAL, &status)) { device_printf(sc->sc_dev, "unable to reset hardware; hal status %u\n", status); return (ENODEV); } ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); ath_chan_change(sc, ic->ic_curchan); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, ic->ic_curchan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, ic->ic_curchan); /* * Let bluetooth coexistence at in case it's needed for this channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips that * support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); /* * Likewise this is set during reset so update * state cached in the driver. */ sc->sc_diversity = ath_hal_getdiversity(ah); sc->sc_lastlongcal = ticks; sc->sc_resetcal = 1; sc->sc_lastcalreset = 0; sc->sc_lastani = ticks; sc->sc_lastshortcal = ticks; sc->sc_doresetcal = AH_FALSE; /* * Beacon timers were cleared here; give ath_newstate() * a hint that the beacon timers should be poked when * things transition to the RUN state. */ sc->sc_beacons = 0; /* * Setup the hardware after reset: the key cache * is filled as needed and the receive engine is * set going. Frame transmit is handled entirely * in the frame output path; there's nothing to do * here except setup the interrupt mask. */ if (ath_startrecv(sc) != 0) { device_printf(sc->sc_dev, "unable to start recv logic\n"); ath_power_restore_power_state(sc); return (ENODEV); } /* * Enable interrupts. */ sc->sc_imask = HAL_INT_RX | HAL_INT_TX | HAL_INT_RXORN | HAL_INT_TXURN | HAL_INT_FATAL | HAL_INT_GLOBAL; /* * Enable RX EDMA bits. Note these overlap with * HAL_INT_RX and HAL_INT_RXDESC respectively. */ if (sc->sc_isedma) sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); /* * If we're an EDMA NIC, we don't care about RXEOL. * Writing a new descriptor in will simply restart * RX DMA. */ if (! sc->sc_isedma) sc->sc_imask |= HAL_INT_RXEOL; /* * Enable MCI interrupt for MCI devices. */ if (sc->sc_btcoex_mci) sc->sc_imask |= HAL_INT_MCI; /* * Enable MIB interrupts when there are hardware phy counters. * Note we only do this (at the moment) for station mode. */ if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) sc->sc_imask |= HAL_INT_MIB; /* * XXX add capability for this. * * If we're in STA mode (and maybe IBSS?) then register for * TSFOOR interrupts. */ if (ic->ic_opmode == IEEE80211_M_STA) sc->sc_imask |= HAL_INT_TSFOOR; /* Enable global TX timeout and carrier sense timeout if available */ if (ath_hal_gtxto_supported(ah)) sc->sc_imask |= HAL_INT_GTT; DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", __func__, sc->sc_imask); sc->sc_running = 1; callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); ath_hal_intrset(ah, sc->sc_imask); ath_power_restore_power_state(sc); return (0); } static void ath_stop(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; ATH_LOCK_ASSERT(sc); /* * Wake the hardware up before fiddling with it. */ ath_power_set_power_state(sc, HAL_PM_AWAKE); if (sc->sc_running) { /* * Shutdown the hardware and driver: * reset 802.11 state machine * turn off timers * disable interrupts * turn off the radio * clear transmit machinery * clear receive machinery * drain and release tx queues * reclaim beacon resources * power down hardware * * Note that some of this work is not possible if the * hardware is gone (invalid). */ #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->stop(sc->sc_tx99); #endif callout_stop(&sc->sc_wd_ch); sc->sc_wd_timer = 0; sc->sc_running = 0; if (!sc->sc_invalid) { if (sc->sc_softled) { callout_stop(&sc->sc_ledtimer); ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); sc->sc_blinking = 0; } ath_hal_intrset(ah, 0); } /* XXX we should stop RX regardless of whether it's valid */ if (!sc->sc_invalid) { ath_stoprecv(sc, 1); ath_hal_phydisable(ah); } else sc->sc_rxlink = NULL; ath_draintxq(sc, ATH_RESET_DEFAULT); ath_beacon_free(sc); /* XXX not needed */ } /* And now, restore the current power state */ ath_power_restore_power_state(sc); } /* * Wait until all pending TX/RX has completed. * * This waits until all existing transmit, receive and interrupts * have completed. It's assumed that the caller has first * grabbed the reset lock so it doesn't try to do overlapping * chip resets. */ #define MAX_TXRX_ITERATIONS 100 static void ath_txrx_stop_locked(struct ath_softc *sc) { int i = MAX_TXRX_ITERATIONS; ATH_UNLOCK_ASSERT(sc); ATH_PCU_LOCK_ASSERT(sc); /* * Sleep until all the pending operations have completed. * * The caller must ensure that reset has been incremented * or the pending operations may continue being queued. */ while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || sc->sc_txstart_cnt || sc->sc_intr_cnt) { if (i <= 0) break; msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", msecs_to_ticks(10)); i--; } if (i <= 0) device_printf(sc->sc_dev, "%s: didn't finish after %d iterations\n", __func__, MAX_TXRX_ITERATIONS); } #undef MAX_TXRX_ITERATIONS #if 0 static void ath_txrx_stop(struct ath_softc *sc) { ATH_UNLOCK_ASSERT(sc); ATH_PCU_UNLOCK_ASSERT(sc); ATH_PCU_LOCK(sc); ath_txrx_stop_locked(sc); ATH_PCU_UNLOCK(sc); } #endif static void ath_txrx_start(struct ath_softc *sc) { taskqueue_unblock(sc->sc_tq); } /* * Grab the reset lock, and wait around until no one else * is trying to do anything with it. * * This is totally horrible but we can't hold this lock for * long enough to do TX/RX or we end up with net80211/ip stack * LORs and eventual deadlock. * * "dowait" signals whether to spin, waiting for the reset * lock count to reach 0. This should (for now) only be used * during the reset path, as the rest of the code may not * be locking-reentrant enough to behave correctly. * * Another, cleaner way should be found to serialise all of * these operations. */ #define MAX_RESET_ITERATIONS 25 static int ath_reset_grablock(struct ath_softc *sc, int dowait) { int w = 0; int i = MAX_RESET_ITERATIONS; ATH_PCU_LOCK_ASSERT(sc); do { if (sc->sc_inreset_cnt == 0) { w = 1; break; } if (dowait == 0) { w = 0; break; } ATH_PCU_UNLOCK(sc); /* * 1 tick is likely not enough time for long calibrations * to complete. So we should wait quite a while. */ pause("ath_reset_grablock", msecs_to_ticks(100)); i--; ATH_PCU_LOCK(sc); } while (i > 0); /* * We always increment the refcounter, regardless * of whether we succeeded to get it in an exclusive * way. */ sc->sc_inreset_cnt++; if (i <= 0) device_printf(sc->sc_dev, "%s: didn't finish after %d iterations\n", __func__, MAX_RESET_ITERATIONS); if (w == 0) device_printf(sc->sc_dev, "%s: warning, recursive reset path!\n", __func__); return w; } #undef MAX_RESET_ITERATIONS /* * Reset the hardware w/o losing operational state. This is * basically a more efficient way of doing ath_stop, ath_init, * followed by state transitions to the current 802.11 * operational state. Used to recover from various errors and * to reset or reload hardware state. */ int ath_reset(struct ath_softc *sc, ATH_RESET_TYPE reset_type) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; int i; DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ ATH_PCU_UNLOCK_ASSERT(sc); ATH_UNLOCK_ASSERT(sc); /* Try to (stop any further TX/RX from occurring */ taskqueue_block(sc->sc_tq); /* * Wake the hardware up. */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); /* * Grab the reset lock before TX/RX is stopped. * * This is needed to ensure that when the TX/RX actually does finish, * no further TX/RX/reset runs in parallel with this. */ if (ath_reset_grablock(sc, 1) == 0) { device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", __func__); } /* disable interrupts */ ath_hal_intrset(ah, 0); /* * Now, ensure that any in progress TX/RX completes before we * continue. */ ath_txrx_stop_locked(sc); ATH_PCU_UNLOCK(sc); /* * Regardless of whether we're doing a no-loss flush or * not, stop the PCU and handle what's in the RX queue. * That way frames aren't dropped which shouldn't be. */ ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); ath_rx_flush(sc); /* * Should now wait for pending TX/RX to complete * and block future ones from occurring. This needs to be * done before the TX queue is drained. */ ath_draintxq(sc, reset_type); /* stop xmit side */ ath_settkipmic(sc); /* configure TKIP MIC handling */ /* NB: indicate channel change so we do a full reset */ ath_update_chainmasks(sc, ic->ic_curchan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, HAL_RESET_NORMAL, &status)) device_printf(sc->sc_dev, "%s: unable to reset hardware; hal status %u\n", __func__, status); sc->sc_diversity = ath_hal_getdiversity(ah); ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, ic->ic_curchan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, ic->ic_curchan); /* * Let bluetooth coexistence at in case it's needed for this channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips that * support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); if (ath_startrecv(sc) != 0) /* restart recv */ device_printf(sc->sc_dev, "%s: unable to start recv logic\n", __func__); /* * We may be doing a reset in response to an ioctl * that changes the channel so update any state that * might change as a result. */ ath_chan_change(sc, ic->ic_curchan); if (sc->sc_beacons) { /* restart beacons */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) ath_tdma_config(sc, NULL); else #endif ath_beacon_config(sc, NULL); } /* * Release the reset lock and re-enable interrupts here. * If an interrupt was being processed in ath_intr(), * it would disable interrupts at this point. So we have * to atomically enable interrupts and decrement the * reset counter - this way ath_intr() doesn't end up * disabling interrupts without a corresponding enable * in the rest or channel change path. * * Grab the TX reference in case we need to transmit. * That way a parallel transmit doesn't. */ ATH_PCU_LOCK(sc); sc->sc_inreset_cnt--; sc->sc_txstart_cnt++; /* XXX only do this if sc_inreset_cnt == 0? */ ath_hal_intrset(ah, sc->sc_imask); ATH_PCU_UNLOCK(sc); /* * TX and RX can be started here. If it were started with * sc_inreset_cnt > 0, the TX and RX path would abort. * Thus if this is a nested call through the reset or * channel change code, TX completion will occur but * RX completion and ath_start / ath_tx_start will not * run. */ /* Restart TX/RX as needed */ ath_txrx_start(sc); /* XXX TODO: we need to hold the tx refcount here! */ /* Restart TX completion and pending TX */ if (reset_type == ATH_RESET_NOLOSS) { for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_txq_restart_dma(sc, &sc->sc_txq[i]); ATH_TXQ_UNLOCK(&sc->sc_txq[i]); ATH_TX_LOCK(sc); ath_txq_sched(sc, &sc->sc_txq[i]); ATH_TX_UNLOCK(sc); } } } ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Handle any frames in the TX queue */ /* * XXX should this be done by the caller, rather than * ath_reset() ? */ ath_tx_kick(sc); /* restart xmit */ return 0; } static int ath_reset_vap(struct ieee80211vap *vap, u_long cmd) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; switch (cmd) { case IEEE80211_IOC_TXPOWER: /* * If per-packet TPC is enabled, then we have nothing * to do; otherwise we need to force the global limit. * All this can happen directly; no need to reset. */ if (!ath_hal_gettpc(ah)) ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); return 0; } /* XXX? Full or NOLOSS? */ return ath_reset(sc, ATH_RESET_FULL); } struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) { struct ath_buf *bf; ATH_TXBUF_LOCK_ASSERT(sc); if (btype == ATH_BUFTYPE_MGMT) bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); else bf = TAILQ_FIRST(&sc->sc_txbuf); if (bf == NULL) { sc->sc_stats.ast_tx_getnobuf++; } else { if (bf->bf_flags & ATH_BUF_BUSY) { sc->sc_stats.ast_tx_getbusybuf++; bf = NULL; } } if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { if (btype == ATH_BUFTYPE_MGMT) TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); else { TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); sc->sc_txbuf_cnt--; /* * This shuldn't happen; however just to be * safe print a warning and fudge the txbuf * count. */ if (sc->sc_txbuf_cnt < 0) { device_printf(sc->sc_dev, "%s: sc_txbuf_cnt < 0?\n", __func__); sc->sc_txbuf_cnt = 0; } } } else bf = NULL; if (bf == NULL) { /* XXX should check which list, mgmt or otherwise */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, TAILQ_FIRST(&sc->sc_txbuf) == NULL ? "out of xmit buffers" : "xmit buffer busy"); return NULL; } /* XXX TODO: should do this at buffer list initialisation */ /* XXX (then, ensure the buffer has the right flag set) */ bf->bf_flags = 0; if (btype == ATH_BUFTYPE_MGMT) bf->bf_flags |= ATH_BUF_MGMT; else bf->bf_flags &= (~ATH_BUF_MGMT); /* Valid bf here; clear some basic fields */ bf->bf_next = NULL; /* XXX just to be sure */ bf->bf_last = NULL; /* XXX again, just to be sure */ bf->bf_comp = NULL; /* XXX again, just to be sure */ bzero(&bf->bf_state, sizeof(bf->bf_state)); /* * Track the descriptor ID only if doing EDMA */ if (sc->sc_isedma) { bf->bf_descid = sc->sc_txbuf_descid; sc->sc_txbuf_descid++; } return bf; } /* * When retrying a software frame, buffers marked ATH_BUF_BUSY * can't be thrown back on the queue as they could still be * in use by the hardware. * * This duplicates the buffer, or returns NULL. * * The descriptor is also copied but the link pointers and * the DMA segments aren't copied; this frame should thus * be again passed through the descriptor setup/chain routines * so the link is correct. * * The caller must free the buffer using ath_freebuf(). */ struct ath_buf * ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) { struct ath_buf *tbf; tbf = ath_getbuf(sc, (bf->bf_flags & ATH_BUF_MGMT) ? ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); if (tbf == NULL) return NULL; /* XXX failure? Why? */ /* Copy basics */ tbf->bf_next = NULL; tbf->bf_nseg = bf->bf_nseg; tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; tbf->bf_status = bf->bf_status; tbf->bf_m = bf->bf_m; tbf->bf_node = bf->bf_node; KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); /* will be setup by the chain/setup function */ tbf->bf_lastds = NULL; /* for now, last == self */ tbf->bf_last = tbf; tbf->bf_comp = bf->bf_comp; /* NOTE: DMA segments will be setup by the setup/chain functions */ /* The caller has to re-init the descriptor + links */ /* * Free the DMA mapping here, before we NULL the mbuf. * We must only call bus_dmamap_unload() once per mbuf chain * or behaviour is undefined. */ if (bf->bf_m != NULL) { /* * XXX is this POSTWRITE call required? */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); } bf->bf_m = NULL; bf->bf_node = NULL; /* Copy state */ memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); return tbf; } struct ath_buf * ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) { struct ath_buf *bf; ATH_TXBUF_LOCK(sc); bf = _ath_getbuf_locked(sc, btype); /* * If a mgmt buffer was requested but we're out of those, * try requesting a normal one. */ if (bf == NULL && btype == ATH_BUFTYPE_MGMT) bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); ATH_TXBUF_UNLOCK(sc); if (bf == NULL) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); sc->sc_stats.ast_tx_qstop++; } return bf; } /* * Transmit a single frame. * * net80211 will free the node reference if the transmit * fails, so don't free the node reference here. */ static int ath_transmit(struct ieee80211com *ic, struct mbuf *m) { struct ath_softc *sc = ic->ic_softc; struct ieee80211_node *ni; struct mbuf *next; struct ath_buf *bf; ath_bufhead frags; int retval = 0; /* * Tell the reset path that we're currently transmitting. */ ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt > 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: sc_inreset_cnt > 0; bailing\n", __func__); ATH_PCU_UNLOCK(sc); sc->sc_stats.ast_tx_qstop++; ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); return (ENOBUFS); /* XXX should be EINVAL or? */ } sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); /* Wake the hardware up already */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); /* * Grab the TX lock - it's ok to do this here; we haven't * yet started transmitting. */ ATH_TX_LOCK(sc); /* * Node reference, if there's one. */ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; /* * Enforce how deep a node queue can get. * * XXX it would be nicer if we kept an mbuf queue per * node and only whacked them into ath_bufs when we * are ready to schedule some traffic from them. * .. that may come later. * * XXX we should also track the per-node hardware queue * depth so it is easy to limit the _SUM_ of the swq and * hwq frames. Since we only schedule two HWQ frames * at a time, this should be OK for now. */ if ((!(m->m_flags & M_EAPOL)) && (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { sc->sc_stats.ast_tx_nodeq_overflow++; retval = ENOBUFS; goto finish; } /* * Check how many TX buffers are available. * * If this is for non-EAPOL traffic, just leave some * space free in order for buffer cloning and raw * frame transmission to occur. * * If it's for EAPOL traffic, ignore this for now. * Management traffic will be sent via the raw transmit * method which bypasses this check. * * This is needed to ensure that EAPOL frames during * (re) keying have a chance to go out. * * See kern/138379 for more information. */ if ((!(m->m_flags & M_EAPOL)) && (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { sc->sc_stats.ast_tx_nobuf++; retval = ENOBUFS; goto finish; } /* * Grab a TX buffer and associated resources. * * If it's an EAPOL frame, allocate a MGMT ath_buf. * That way even with temporary buffer exhaustion due to * the data path doesn't leave us without the ability * to transmit management frames. * * Otherwise allocate a normal buffer. */ if (m->m_flags & M_EAPOL) bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); else bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); if (bf == NULL) { /* * If we failed to allocate a buffer, fail. * * We shouldn't fail normally, due to the check * above. */ sc->sc_stats.ast_tx_nobuf++; retval = ENOBUFS; goto finish; } /* * At this point we have a buffer; so we need to free it * if we hit any error conditions. */ /* * Check for fragmentation. If this frame * has been broken up verify we have enough * buffers to send all the fragments so all * go out or none... */ TAILQ_INIT(&frags); if ((m->m_flags & M_FRAG) && !ath_txfrag_setup(sc, &frags, m, ni)) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of txfrag buffers\n", __func__); sc->sc_stats.ast_tx_nofrag++; if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); /* * XXXGL: is mbuf valid after ath_txfrag_setup? If yes, * we shouldn't free it but return back. */ ieee80211_free_mbuf(m); m = NULL; goto bad; } /* * At this point if we have any TX fragments, then we will * have bumped the node reference once for each of those. */ /* * XXX Is there anything actually _enforcing_ that the * fragments are being transmitted in one hit, rather than * being interleaved with other transmissions on that * hardware queue? * * The ATH TX output lock is the only thing serialising this * right now. */ /* * Calculate the "next fragment" length field in ath_buf * in order to let the transmit path know enough about * what to next write to the hardware. */ if (m->m_flags & M_FRAG) { struct ath_buf *fbf = bf; struct ath_buf *n_fbf = NULL; struct mbuf *fm = m->m_nextpkt; /* * We need to walk the list of fragments and set * the next size to the following buffer. * However, the first buffer isn't in the frag * list, so we have to do some gymnastics here. */ TAILQ_FOREACH(n_fbf, &frags, bf_list) { fbf->bf_nextfraglen = fm->m_pkthdr.len; fbf = n_fbf; fm = fm->m_nextpkt; } } nextfrag: /* * Pass the frame to the h/w for transmission. * Fragmented frames have each frag chained together * with m_nextpkt. We know there are sufficient ath_buf's * to send all the frags because of work done by * ath_txfrag_setup. We leave m_nextpkt set while * calling ath_tx_start so it can use it to extend the * the tx duration to cover the subsequent frag and * so it can reclaim all the mbufs in case of an error; * ath_tx_start clears m_nextpkt once it commits to * handing the frame to the hardware. * * Note: if this fails, then the mbufs are freed but * not the node reference. * * So, we now have to free the node reference ourselves here * and return OK up to the stack. */ next = m->m_nextpkt; if (ath_tx_start(sc, ni, bf, m)) { bad: if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); reclaim: bf->bf_m = NULL; bf->bf_node = NULL; ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, bf); /* * Free the rest of the node references and * buffers for the fragment list. */ ath_txfrag_cleanup(sc, &frags, ni); ATH_TXBUF_UNLOCK(sc); /* * XXX: And free the node/return OK; ath_tx_start() may have * modified the buffer. We currently have no way to * signify that the mbuf was freed but there was an error. */ ieee80211_free_node(ni); retval = 0; goto finish; } /* * Check here if the node is in power save state. */ ath_tx_update_tim(sc, ni, 1); if (next != NULL) { /* * Beware of state changing between frags. * XXX check sta power-save state? */ if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: flush fragmented packet, state %s\n", __func__, ieee80211_state_name[ni->ni_vap->iv_state]); /* XXX dmamap */ ieee80211_free_mbuf(next); goto reclaim; } m = next; bf = TAILQ_FIRST(&frags); KASSERT(bf != NULL, ("no buf for txfrag")); TAILQ_REMOVE(&frags, bf, bf_list); goto nextfrag; } /* * Bump watchdog timer. */ sc->sc_wd_timer = 5; finish: ATH_TX_UNLOCK(sc); /* * Finished transmitting! */ ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Sleep the hardware if required */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); return (retval); } static int ath_media_change(struct ifnet *ifp) { int error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } /* * Block/unblock tx+rx processing while a key change is done. * We assume the caller serializes key management operations * so we only need to worry about synchronization with other * uses that originate in the driver. */ static void ath_key_update_begin(struct ieee80211vap *vap) { struct ath_softc *sc = vap->iv_ic->ic_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); taskqueue_block(sc->sc_tq); } static void ath_key_update_end(struct ieee80211vap *vap) { struct ath_softc *sc = vap->iv_ic->ic_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); taskqueue_unblock(sc->sc_tq); } static void ath_update_promisc(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; u_int32_t rfilt; /* configure rx filter */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(sc->sc_ah, rfilt); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); } /* * Driver-internal mcast update call. * * Assumes the hardware is already awake. */ static void ath_update_mcast_hw(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; u_int32_t mfilt[2]; /* calculate and install multicast filter */ if (ic->ic_allmulti == 0) { struct ieee80211vap *vap; struct ifnet *ifp; struct ifmultiaddr *ifma; /* * Merge multicast addresses to form the hardware filter. */ mfilt[0] = mfilt[1] = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { ifp = vap->iv_ifp; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { caddr_t dl; uint32_t val; uint8_t pos; /* calculate XOR of eight 6bit values */ dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); val = le32dec(dl + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; val = le32dec(dl + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); } if_maddr_runlock(ifp); } } else mfilt[0] = mfilt[1] = ~0; ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", __func__, mfilt[0], mfilt[1]); } /* * Called from the net80211 layer - force the hardware * awake before operating. */ static void ath_update_mcast(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ath_update_mcast_hw(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } void ath_mode_init(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; /* configure rx filter */ rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(ah, rfilt); /* configure operational mode */ ath_hal_setopmode(ah); /* handle any link-level address change */ ath_hal_setmac(ah, ic->ic_macaddr); /* calculate and install multicast filter */ ath_update_mcast_hw(sc); } /* * Set the slot time based on the current setting. */ void ath_setslottime(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; u_int usec; if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) usec = 13; else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) usec = 21; else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { /* honor short/long slot time only in 11g */ /* XXX shouldn't honor on pure g or turbo g channel */ if (ic->ic_flags & IEEE80211_F_SHSLOT) usec = HAL_SLOT_TIME_9; else usec = HAL_SLOT_TIME_20; } else usec = HAL_SLOT_TIME_9; DPRINTF(sc, ATH_DEBUG_RESET, "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); /* Wake up the hardware first before updating the slot time */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_hal_setslottime(ah, usec); ath_power_restore_power_state(sc); sc->sc_updateslot = OK; ATH_UNLOCK(sc); } /* * Callback from the 802.11 layer to update the * slot time based on the current setting. */ static void ath_updateslot(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; /* * When not coordinating the BSS, change the hardware * immediately. For other operation we defer the change * until beacon updates have propagated to the stations. * * XXX sc_updateslot isn't changed behind a lock? */ if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) sc->sc_updateslot = UPDATE; else ath_setslottime(sc); } /* * Append the contents of src to dst; both queues * are assumed to be locked. */ void ath_txqmove(struct ath_txq *dst, struct ath_txq *src) { ATH_TXQ_LOCK_ASSERT(src); ATH_TXQ_LOCK_ASSERT(dst); TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); dst->axq_link = src->axq_link; src->axq_link = NULL; dst->axq_depth += src->axq_depth; dst->axq_aggr_depth += src->axq_aggr_depth; src->axq_depth = 0; src->axq_aggr_depth = 0; } /* * Reset the hardware, with no loss. * * This can't be used for a general case reset. */ static void ath_reset_proc(void *arg, int pending) { struct ath_softc *sc = arg; #if 0 device_printf(sc->sc_dev, "%s: resetting\n", __func__); #endif ath_reset(sc, ATH_RESET_NOLOSS); } /* * Reset the hardware after detecting beacons have stopped. */ static void ath_bstuck_proc(void *arg, int pending) { struct ath_softc *sc = arg; uint32_t hangs = 0; if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) device_printf(sc->sc_dev, "bb hang detected (0x%x)\n", hangs); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); #endif device_printf(sc->sc_dev, "stuck beacon; resetting (bmiss count %u)\n", sc->sc_bmisscount); sc->sc_stats.ast_bstuck++; /* * This assumes that there's no simultaneous channel mode change * occurring. */ ath_reset(sc, ATH_RESET_NOLOSS); } static int ath_desc_alloc(struct ath_softc *sc) { int error; error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); if (error != 0) { return error; } sc->sc_txbuf_cnt = ath_txbuf; error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, ATH_TXDESC); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); return error; } /* * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the * flag doesn't have to be set in ath_getbuf_locked(). */ error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt); return error; } return 0; } static void ath_desc_free(struct ath_softc *sc) { if (sc->sc_bdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); if (sc->sc_txdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); if (sc->sc_txdma_mgmt.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt); } static struct ieee80211_node * ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; struct ath_node *an; an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); if (an == NULL) { /* XXX stat+msg */ return NULL; } ath_rate_node_init(sc, an); /* Setup the mutex - there's no associd yet so set the name to NULL */ snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", device_get_nameunit(sc->sc_dev), an); mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); /* XXX setup ath_tid */ ath_tx_tid_init(sc, an); DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an); return &an->an_node; } static void ath_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, ni->ni_macaddr, ":", ATH_NODE(ni)); /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ ath_tx_node_flush(sc, ATH_NODE(ni)); ath_rate_node_cleanup(sc, ATH_NODE(ni)); sc->sc_node_cleanup(ni); } static void ath_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, ni->ni_macaddr, ":", ATH_NODE(ni)); mtx_destroy(&ATH_NODE(ni)->an_mtx); sc->sc_node_free(ni); } static void ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; *rssi = ic->ic_node_getrssi(ni); if (ni->ni_chan != IEEE80211_CHAN_ANYC) *noise = ath_hal_getchannoise(ah, ni->ni_chan); else *noise = -95; /* nominally correct */ } /* * Set the default antenna. */ void ath_setdefantenna(struct ath_softc *sc, u_int antenna) { struct ath_hal *ah = sc->sc_ah; /* XXX block beacon interrupts */ ath_hal_setdefantenna(ah, antenna); if (sc->sc_defant != antenna) sc->sc_stats.ast_ant_defswitch++; sc->sc_defant = antenna; sc->sc_rxotherant = 0; } static void ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) { txq->axq_qnum = qnum; txq->axq_ac = 0; txq->axq_depth = 0; txq->axq_aggr_depth = 0; txq->axq_intrcnt = 0; txq->axq_link = NULL; txq->axq_softc = sc; TAILQ_INIT(&txq->axq_q); TAILQ_INIT(&txq->axq_tidq); TAILQ_INIT(&txq->fifo.axq_q); ATH_TXQ_LOCK_INIT(sc, txq); } /* * Setup a h/w transmit queue. */ static struct ath_txq * ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) { struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; int qnum; memset(&qi, 0, sizeof(qi)); qi.tqi_subtype = subtype; qi.tqi_aifs = HAL_TXQ_USEDEFAULT; qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; /* * Enable interrupts only for EOL and DESC conditions. * We mark tx descriptors to receive a DESC interrupt * when a tx queue gets deep; otherwise waiting for the * EOL to reap descriptors. Note that this is done to * reduce interrupt load and this only defers reaping * descriptors, never transmitting frames. Aside from * reducing interrupts this also permits more concurrency. * The only potential downside is if the tx queue backs * up in which case the top half of the kernel may backup * due to a lack of tx descriptors. */ if (sc->sc_isedma) qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXOKINT_ENABLE; else qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; qnum = ath_hal_setuptxqueue(ah, qtype, &qi); if (qnum == -1) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return NULL; } if (qnum >= nitems(sc->sc_txq)) { device_printf(sc->sc_dev, "hal qnum %u out of range, max %zu!\n", qnum, nitems(sc->sc_txq)); ath_hal_releasetxqueue(ah, qnum); return NULL; } if (!ATH_TXQ_SETUP(sc, qnum)) { ath_txq_init(sc, &sc->sc_txq[qnum], qnum); sc->sc_txqsetup |= 1<sc_txq[qnum]; } /* * Setup a hardware data transmit queue for the specified * access control. The hal may not support all requested * queues in which case it will return a reference to a * previously setup queue. We record the mapping from ac's * to h/w queues for use by ath_tx_start and also track * the set of h/w queues being used to optimize work in the * transmit interrupt handler and related routines. */ static int ath_tx_setup(struct ath_softc *sc, int ac, int haltype) { struct ath_txq *txq; if (ac >= nitems(sc->sc_ac2q)) { device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", ac, nitems(sc->sc_ac2q)); return 0; } txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); if (txq != NULL) { txq->axq_ac = ac; sc->sc_ac2q[ac] = txq; return 1; } else return 0; } /* * Update WME parameters for a transmit queue. */ static int ath_txq_update(struct ath_softc *sc, int ac) { #define ATH_EXPONENT_TO_VALUE(v) ((1<sc_ic; struct ath_txq *txq = sc->sc_ac2q[ac]; struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) { /* * AIFS is zero so there's no pre-transmit wait. The * burst time defines the slot duration and is configured * through net80211. The QCU is setup to not do post-xmit * back off, lockout all lower-priority QCU's, and fire * off the DMA beacon alert timer which is setup based * on the slot configuration. */ qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE | HAL_TXQ_TXERRINT_ENABLE | HAL_TXQ_TXURNINT_ENABLE | HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_DBA_GATED | HAL_TXQ_BACKOFF_DISABLE | HAL_TXQ_ARB_LOCKOUT_GLOBAL ; qi.tqi_aifs = 0; /* XXX +dbaprep? */ qi.tqi_readyTime = sc->sc_tdmaslotlen; qi.tqi_burstTime = qi.tqi_readyTime; } else { #endif /* * XXX shouldn't this just use the default flags * used in the previous queue setup? */ qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE | HAL_TXQ_TXERRINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE | HAL_TXQ_TXURNINT_ENABLE | HAL_TXQ_TXEOLINT_ENABLE ; qi.tqi_aifs = wmep->wmep_aifsn; qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); qi.tqi_readyTime = 0; qi.tqi_burstTime = IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit); #ifdef IEEE80211_SUPPORT_TDMA } #endif DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", __func__, txq->axq_qnum, qi.tqi_qflags, qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { device_printf(sc->sc_dev, "unable to update hardware queue " "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); return 0; } else { ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ return 1; } #undef ATH_EXPONENT_TO_VALUE } /* * Callback from the 802.11 layer to update WME parameters. */ int ath_wme_update(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; return !ath_txq_update(sc, WME_AC_BE) || !ath_txq_update(sc, WME_AC_BK) || !ath_txq_update(sc, WME_AC_VI) || !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; } /* * Reclaim resources for a setup queue. */ static void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) { ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); sc->sc_txqsetup &= ~(1<axq_qnum); ATH_TXQ_LOCK_DESTROY(txq); } /* * Reclaim all tx queue resources. */ static void ath_tx_cleanup(struct ath_softc *sc) { int i; ATH_TXBUF_LOCK_DESTROY(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_cleanupq(sc, &sc->sc_txq[i]); } /* * Return h/w rate index for an IEEE rate (w/o basic rate bit) * using the current rates in sc_rixmap. */ int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) { int rix = sc->sc_rixmap[rate]; /* NB: return lowest rix for invalid rate */ return (rix == 0xff ? 0 : rix); } static void ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ieee80211com *ic = &sc->sc_ic; int sr, lr, pri; if (ts->ts_status == 0) { u_int8_t txant = ts->ts_antenna; sc->sc_stats.ast_ant_tx[txant]++; sc->sc_ant_tx[txant]++; if (ts->ts_finaltsi != 0) sc->sc_stats.ast_tx_altrate++; pri = M_WME_GETAC(bf->bf_m); if (pri >= WME_AC_VO) ic->ic_wme.wme_hipri_traffic++; if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ni->ni_inact = ni->ni_inact_reload; } else { if (ts->ts_status & HAL_TXERR_XRETRY) sc->sc_stats.ast_tx_xretries++; if (ts->ts_status & HAL_TXERR_FIFO) sc->sc_stats.ast_tx_fifoerr++; if (ts->ts_status & HAL_TXERR_FILT) sc->sc_stats.ast_tx_filtered++; if (ts->ts_status & HAL_TXERR_XTXOP) sc->sc_stats.ast_tx_xtxop++; if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) sc->sc_stats.ast_tx_timerexpired++; if (bf->bf_m->m_flags & M_FF) sc->sc_stats.ast_ff_txerr++; } /* XXX when is this valid? */ if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) sc->sc_stats.ast_tx_desccfgerr++; /* * This can be valid for successful frame transmission! * If there's a TX FIFO underrun during aggregate transmission, * the MAC will pad the rest of the aggregate with delimiters. * If a BA is returned, the frame is marked as "OK" and it's up * to the TX completion code to notice which frames weren't * successfully transmitted. */ if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) sc->sc_stats.ast_tx_data_underrun++; if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) sc->sc_stats.ast_tx_delim_underrun++; sr = ts->ts_shortretry; lr = ts->ts_longretry; sc->sc_stats.ast_tx_shortretry += sr; sc->sc_stats.ast_tx_longretry += lr; } /* * The default completion. If fail is 1, this means * "please don't retry the frame, and just return -1 status * to the net80211 stack. */ void ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ath_tx_status *ts = &bf->bf_status.ds_txstat; int st; if (fail == 1) st = -1; else st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? ts->ts_status : HAL_TXERR_XRETRY; #if 0 if (bf->bf_state.bfs_dobaw) device_printf(sc->sc_dev, "%s: bf %p: seqno %d: dobaw should've been cleared!\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno)); #endif if (bf->bf_next != NULL) device_printf(sc->sc_dev, "%s: bf %p: seqno %d: bf_next not NULL!\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno)); /* * Check if the node software queue is empty; if so * then clear the TIM. * * This needs to be done before the buffer is freed as * otherwise the node reference will have been released * and the node may not actually exist any longer. * * XXX I don't like this belonging here, but it's cleaner * to do it here right now then all the other places * where ath_tx_default_comp() is called. * * XXX TODO: during drain, ensure that the callback is * being called so we get a chance to update the TIM. */ if (bf->bf_node) { ATH_TX_LOCK(sc); ath_tx_update_tim(sc, bf->bf_node, 0); ATH_TX_UNLOCK(sc); } /* * Do any tx complete callback. Note this must * be done before releasing the node reference. * This will free the mbuf, release the net80211 * node and recycle the ath_buf. */ ath_tx_freebuf(sc, bf, st); } /* * Update rate control with the given completion status. */ void ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, int nframes, int nbad) { struct ath_node *an; /* Only for unicast frames */ if (ni == NULL) return; an = ATH_NODE(ni); ATH_NODE_UNLOCK_ASSERT(an); if ((ts->ts_status & HAL_TXERR_FILT) == 0) { ATH_NODE_LOCK(an); ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); ATH_NODE_UNLOCK(an); } } /* * Process the completion of the given buffer. * * This calls the rate control update and then the buffer completion. * This will either free the buffer or requeue it. In any case, the * bf pointer should be treated as invalid after this function is called. */ void ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; ATH_TX_UNLOCK_ASSERT(sc); ATH_TXQ_UNLOCK_ASSERT(txq); /* If unicast frame, update general statistics */ if (ni != NULL) { /* update statistics */ ath_tx_update_stats(sc, ts, bf); } /* * Call the completion handler. * The completion handler is responsible for * calling the rate control code. * * Frames with no completion handler get the * rate control code called here. */ if (bf->bf_comp == NULL) { if ((ts->ts_status & HAL_TXERR_FILT) == 0 && (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { /* * XXX assume this isn't an aggregate * frame. */ ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, ts, bf->bf_state.bfs_pktlen, 1, (ts->ts_status == 0 ? 0 : 1)); } ath_tx_default_comp(sc, bf, 0); } else bf->bf_comp(sc, bf, 0); } /* * Process completed xmit descriptors from the specified queue. * Kick the packet scheduler if needed. This can occur from this * particular task. */ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; struct ath_desc *ds; struct ath_tx_status *ts; struct ieee80211_node *ni; #ifdef IEEE80211_SUPPORT_SUPERG struct ieee80211com *ic = &sc->sc_ic; #endif /* IEEE80211_SUPPORT_SUPERG */ int nacked; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link); ATH_KTR(sc, ATH_KTR_TXCOMP, 4, "ath_tx_processq: txq=%u head %p link %p depth %p", txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link, txq->axq_depth); nacked = 0; for (;;) { ATH_TXQ_LOCK(txq); txq->axq_intrcnt = 0; /* reset periodic desc intr count */ bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { ATH_TXQ_UNLOCK(txq); break; } ds = bf->bf_lastds; /* XXX must be setup correctly! */ ts = &bf->bf_status.ds_txstat; status = ath_hal_txprocdesc(ah, ds, ts); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) ath_printtxbuf(sc, bf, txq->axq_qnum, 0, status == HAL_OK); else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) ath_printtxbuf(sc, bf, txq->axq_qnum, 0, status == HAL_OK); #endif #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) { if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, sc->sc_tx_statuslen, (char *) ds); } #endif if (status == HAL_EINPROGRESS) { ATH_KTR(sc, ATH_KTR_TXCOMP, 3, "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", txq->axq_qnum, bf, ds); ATH_TXQ_UNLOCK(txq); break; } ATH_TXQ_REMOVE(txq, bf, bf_list); /* * Sanity check. */ if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { device_printf(sc->sc_dev, "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", __func__, txq->axq_qnum, bf, bf->bf_state.bfs_tx_queue); } if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { device_printf(sc->sc_dev, "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", __func__, txq->axq_qnum, bf->bf_last, bf->bf_last->bf_state.bfs_tx_queue); } #if 0 if (txq->axq_depth > 0) { /* * More frames follow. Mark the buffer busy * so it's not re-used while the hardware may * still re-read the link field in the descriptor. * * Use the last buffer in an aggregate as that * is where the hardware may be - intermediate * descriptors won't be "busy". */ bf->bf_last->bf_flags |= ATH_BUF_BUSY; } else txq->axq_link = NULL; #else bf->bf_last->bf_flags |= ATH_BUF_BUSY; #endif if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth--; ni = bf->bf_node; ATH_KTR(sc, ATH_KTR_TXCOMP, 5, "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", txq->axq_qnum, bf, ds, ni, ts->ts_status); /* * If unicast frame was ack'd update RSSI, * including the last rx time used to * workaround phantom bmiss interrupts. */ if (ni != NULL && ts->ts_status == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { nacked++; sc->sc_stats.ast_tx_rssi = ts->ts_rssi; ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, ts->ts_rssi); } ATH_TXQ_UNLOCK(txq); /* * Update statistics and call completion */ ath_tx_process_buf_completion(sc, txq, ts, bf); /* XXX at this point, bf and ni may be totally invalid */ } #ifdef IEEE80211_SUPPORT_SUPERG /* * Flush fast-frame staging queue when traffic slows. */ if (txq->axq_depth <= 1) ieee80211_ff_flush(ic, txq->axq_ac); #endif /* Kick the software TXQ scheduler */ if (dosched) { ATH_TX_LOCK(sc); ath_txq_sched(sc, txq); ATH_TX_UNLOCK(sc); } ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_processq: txq=%u: done", txq->axq_qnum); return nacked; } #define TXQACTIVE(t, q) ( (t) & (1 << (q))) /* * Deferred processing of transmit interrupt; special-cased * for a single hardware transmit queue (e.g. 5210 and 5211). */ static void ath_tx_proc_q0(void *arg, int npending) { struct ath_softc *sc = arg; uint32_t txqs; ATH_PCU_LOCK(sc); sc->sc_txproc_cnt++; txqs = sc->sc_txq_active; sc->sc_txq_active &= ~txqs; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc_q0: txqs=0x%08x", txqs); if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) /* XXX why is lastrx updated in tx code? */ sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) ath_tx_processq(sc, sc->sc_cabq, 1); sc->sc_wd_timer = 0; if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ath_tx_kick(sc); } /* * Deferred processing of transmit interrupt; special-cased * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). */ static void ath_tx_proc_q0123(void *arg, int npending) { struct ath_softc *sc = arg; int nacked; uint32_t txqs; ATH_PCU_LOCK(sc); sc->sc_txproc_cnt++; txqs = sc->sc_txq_active; sc->sc_txq_active &= ~txqs; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc_q0123: txqs=0x%08x", txqs); /* * Process each active queue. */ nacked = 0; if (TXQACTIVE(txqs, 0)) nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); if (TXQACTIVE(txqs, 1)) nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); if (TXQACTIVE(txqs, 2)) nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); if (TXQACTIVE(txqs, 3)) nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) ath_tx_processq(sc, sc->sc_cabq, 1); if (nacked) sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); sc->sc_wd_timer = 0; if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ath_tx_kick(sc); } /* * Deferred processing of transmit interrupt. */ static void ath_tx_proc(void *arg, int npending) { struct ath_softc *sc = arg; int i, nacked; uint32_t txqs; ATH_PCU_LOCK(sc); sc->sc_txproc_cnt++; txqs = sc->sc_txq_active; sc->sc_txq_active &= ~txqs; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); /* * Process each active queue. */ nacked = 0; for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); if (nacked) sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); sc->sc_wd_timer = 0; if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ath_tx_kick(sc); } #undef TXQACTIVE /* * Deferred processing of TXQ rescheduling. */ static void ath_txq_sched_tasklet(void *arg, int npending) { struct ath_softc *sc = arg; int i; /* XXX is skipping ok? */ ATH_PCU_LOCK(sc); #if 0 if (sc->sc_inreset_cnt > 0) { device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", __func__); ATH_PCU_UNLOCK(sc); return; } #endif sc->sc_txproc_cnt++; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_TX_LOCK(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { ath_txq_sched(sc, &sc->sc_txq[i]); } } ATH_TX_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); } void ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) { ATH_TXBUF_LOCK_ASSERT(sc); if (bf->bf_flags & ATH_BUF_MGMT) TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); else { TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); sc->sc_txbuf_cnt++; if (sc->sc_txbuf_cnt > ath_txbuf) { device_printf(sc->sc_dev, "%s: sc_txbuf_cnt > %d?\n", __func__, ath_txbuf); sc->sc_txbuf_cnt = ath_txbuf; } } } void ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) { ATH_TXBUF_LOCK_ASSERT(sc); if (bf->bf_flags & ATH_BUF_MGMT) TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); else { TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); sc->sc_txbuf_cnt++; if (sc->sc_txbuf_cnt > ATH_TXBUF) { device_printf(sc->sc_dev, "%s: sc_txbuf_cnt > %d?\n", __func__, ATH_TXBUF); sc->sc_txbuf_cnt = ATH_TXBUF; } } } /* * Free the holding buffer if it exists */ void ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) { ATH_TXBUF_UNLOCK_ASSERT(sc); ATH_TXQ_LOCK_ASSERT(txq); if (txq->axq_holdingbf == NULL) return; txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; ATH_TXBUF_LOCK(sc); ath_returnbuf_tail(sc, txq->axq_holdingbf); ATH_TXBUF_UNLOCK(sc); txq->axq_holdingbf = NULL; } /* * Add this buffer to the holding queue, freeing the previous * one if it exists. */ static void ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) { struct ath_txq *txq; txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; ATH_TXBUF_UNLOCK_ASSERT(sc); ATH_TXQ_LOCK_ASSERT(txq); /* XXX assert ATH_BUF_BUSY is set */ /* XXX assert the tx queue is under the max number */ if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", __func__, bf, bf->bf_state.bfs_tx_queue); bf->bf_flags &= ~ATH_BUF_BUSY; ath_returnbuf_tail(sc, bf); return; } ath_txq_freeholdingbuf(sc, txq); txq->axq_holdingbf = bf; } /* * Return a buffer to the pool and update the 'busy' flag on the * previous 'tail' entry. * * This _must_ only be called when the buffer is involved in a completed * TX. The logic is that if it was part of an active TX, the previous * buffer on the list is now not involved in a halted TX DMA queue, waiting * for restart (eg for TDMA.) * * The caller must free the mbuf and recycle the node reference. * * XXX This method of handling busy / holding buffers is insanely stupid. * It requires bf_state.bfs_tx_queue to be correctly assigned. It would * be much nicer if buffers in the processq() methods would instead be * always completed there (pushed onto a txq or ath_bufhead) so we knew * exactly what hardware queue they came from in the first place. */ void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) { struct ath_txq *txq; txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); /* * If this buffer is busy, push it onto the holding queue. */ if (bf->bf_flags & ATH_BUF_BUSY) { ATH_TXQ_LOCK(txq); ath_txq_addholdingbuf(sc, bf); ATH_TXQ_UNLOCK(txq); return; } /* * Not a busy buffer, so free normally */ ATH_TXBUF_LOCK(sc); ath_returnbuf_tail(sc, bf); ATH_TXBUF_UNLOCK(sc); } /* * This is currently used by ath_tx_draintxq() and * ath_tx_tid_free_pkts(). * * It recycles a single ath_buf. */ void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) { struct ieee80211_node *ni = bf->bf_node; struct mbuf *m0 = bf->bf_m; /* * Make sure that we only sync/unload if there's an mbuf. * If not (eg we cloned a buffer), the unload will have already * occurred. */ if (bf->bf_m != NULL) { bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); } bf->bf_node = NULL; bf->bf_m = NULL; /* Free the buffer, it's not needed any longer */ ath_freebuf(sc, bf); /* Pass the buffer back to net80211 - completing it */ ieee80211_tx_complete(ni, m0, status); } static struct ath_buf * ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) { struct ath_buf *bf; ATH_TXQ_LOCK_ASSERT(txq); /* * Drain the FIFO queue first, then if it's * empty, move to the normal frame queue. */ bf = TAILQ_FIRST(&txq->fifo.axq_q); if (bf != NULL) { /* * Is it the last buffer in this set? * Decrement the FIFO counter. */ if (bf->bf_flags & ATH_BUF_FIFOEND) { if (txq->axq_fifo_depth == 0) { device_printf(sc->sc_dev, "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", __func__, txq->axq_qnum, txq->fifo.axq_depth); } else txq->axq_fifo_depth--; } ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); return (bf); } /* * Debugging! */ if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { device_printf(sc->sc_dev, "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", __func__, txq->axq_qnum, txq->axq_fifo_depth, txq->fifo.axq_depth); } /* * Now drain the pending queue. */ bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { txq->axq_link = NULL; return (NULL); } ATH_TXQ_REMOVE(txq, bf, bf_list); return (bf); } void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) { #ifdef ATH_DEBUG struct ath_hal *ah = sc->sc_ah; #endif struct ath_buf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block ath_tx_proc */ for (ix = 0;; ix++) { ATH_TXQ_LOCK(txq); bf = ath_tx_draintxq_get_one(sc, txq); if (bf == NULL) { ATH_TXQ_UNLOCK(txq); break; } if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth--; #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) { struct ieee80211com *ic = &sc->sc_ic; int status = 0; /* * EDMA operation has a TX completion FIFO * separate from the TX descriptor, so this * method of checking the "completion" status * is wrong. */ if (! sc->sc_isedma) { status = (ath_hal_txprocdesc(ah, bf->bf_lastds, &bf->bf_status.ds_txstat) == HAL_OK); } ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 0, -1); } #endif /* ATH_DEBUG */ /* * Since we're now doing magic in the completion * functions, we -must- call it for aggregation * destinations or BAW tracking will get upset. */ /* * Clear ATH_BUF_BUSY; the completion handler * will free the buffer. */ ATH_TXQ_UNLOCK(txq); bf->bf_flags &= ~ATH_BUF_BUSY; if (bf->bf_comp) bf->bf_comp(sc, bf, 1); else ath_tx_default_comp(sc, bf, 1); } /* * Free the holding buffer if it exists */ ATH_TXQ_LOCK(txq); ath_txq_freeholdingbuf(sc, txq); ATH_TXQ_UNLOCK(txq); /* * Drain software queued frames which are on * active TIDs. */ ath_tx_txq_drain(sc, txq); } static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; ATH_TXQ_LOCK_ASSERT(txq); DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " "link %p, holdingbf=%p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), (int) ath_hal_numtxpending(ah, txq->axq_qnum), txq->axq_flags, txq->axq_link, txq->axq_holdingbf); (void) ath_hal_stoptxdma(ah, txq->axq_qnum); /* We've stopped TX DMA, so mark this as stopped. */ txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; #ifdef ATH_DEBUG if ((sc->sc_debug & ATH_DEBUG_RESET) && (txq->axq_holdingbf != NULL)) { ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); } #endif } int ath_stoptxdma(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; int i; /* XXX return value */ if (sc->sc_invalid) return 0; if (!sc->sc_invalid) { /* don't touch the hardware if marked invalid */ DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", __func__, sc->sc_bhalq, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), NULL); /* stop the beacon queue */ (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); /* Stop the data queues */ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_tx_stopdma(sc, &sc->sc_txq[i]); ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } } } return 1; } #ifdef ATH_DEBUG void ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; int i = 0; if (! (sc->sc_debug & ATH_DEBUG_RESET)) return; device_printf(sc->sc_dev, "%s: Q%d: begin\n", __func__, txq->axq_qnum); TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { ath_printtxbuf(sc, bf, txq->axq_qnum, i, ath_hal_txprocdesc(ah, bf->bf_lastds, &bf->bf_status.ds_txstat) == HAL_OK); i++; } device_printf(sc->sc_dev, "%s: Q%d: end\n", __func__, txq->axq_qnum); } #endif /* ATH_DEBUG */ /* * Drain the transmit queues and reclaim resources. */ void ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf_last; int i; (void) ath_stoptxdma(sc); /* * Dump the queue contents */ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { /* * XXX TODO: should we just handle the completed TX frames * here, whether or not the reset is a full one or not? */ if (ATH_TXQ_SETUP(sc, i)) { #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) ath_tx_dump(sc, &sc->sc_txq[i]); #endif /* ATH_DEBUG */ if (reset_type == ATH_RESET_NOLOSS) { ath_tx_processq(sc, &sc->sc_txq[i], 0); ATH_TXQ_LOCK(&sc->sc_txq[i]); /* * Free the holding buffer; DMA is now * stopped. */ ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); /* * Setup the link pointer to be the * _last_ buffer/descriptor in the list. * If there's nothing in the list, set it * to NULL. */ bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], axq_q_s); if (bf_last != NULL) { ath_hal_gettxdesclinkptr(ah, bf_last->bf_lastds, &sc->sc_txq[i].axq_link); } else { sc->sc_txq[i].axq_link = NULL; } ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } else ath_tx_draintxq(sc, &sc->sc_txq[i]); } } #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) { struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); if (bf != NULL && bf->bf_m != NULL) { ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, ath_hal_txprocdesc(ah, bf->bf_lastds, &bf->bf_status.ds_txstat) == HAL_OK); ieee80211_dump_pkt(&sc->sc_ic, mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 0, -1); } } #endif /* ATH_DEBUG */ sc->sc_wd_timer = 0; } /* * Update internal state after a channel change. */ static void ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) { enum ieee80211_phymode mode; /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ mode = ieee80211_chan2mode(chan); if (mode != sc->sc_curmode) ath_setcurmode(sc, mode); sc->sc_curchan = chan; } /* * Set/change channels. If the channel is really being changed, * it's done by resetting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * ath_init. */ static int ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; int ret = 0; /* Treat this as an interface reset */ ATH_PCU_UNLOCK_ASSERT(sc); ATH_UNLOCK_ASSERT(sc); /* (Try to) stop TX/RX from occurring */ taskqueue_block(sc->sc_tq); ATH_PCU_LOCK(sc); /* Disable interrupts */ ath_hal_intrset(ah, 0); /* Stop new RX/TX/interrupt completion */ if (ath_reset_grablock(sc, 1) == 0) { device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", __func__); } /* Stop pending RX/TX completion */ ath_txrx_stop_locked(sc); ATH_PCU_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", __func__, ieee80211_chan2ieee(ic, chan), chan->ic_freq, chan->ic_flags); if (chan != sc->sc_curchan) { HAL_STATUS status; /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ #if 0 ath_hal_intrset(ah, 0); /* disable interrupts */ #endif ath_stoprecv(sc, 1); /* turn off frame recv */ /* * First, handle completed TX/RX frames. */ ath_rx_flush(sc); ath_draintxq(sc, ATH_RESET_NOLOSS); /* * Next, flush the non-scheduled frames. */ ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ ath_update_chainmasks(sc, chan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, HAL_RESET_NORMAL, &status)) { device_printf(sc->sc_dev, "%s: unable to reset " "channel %u (%u MHz, flags 0x%x), hal status %u\n", __func__, ieee80211_chan2ieee(ic, chan), chan->ic_freq, chan->ic_flags, status); ret = EIO; goto finish; } sc->sc_diversity = ath_hal_getdiversity(ah); ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, chan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, chan); /* * Let bluetooth coexistence at in case it's needed for this * channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips * that support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); /* * Re-enable rx framework. */ if (ath_startrecv(sc) != 0) { device_printf(sc->sc_dev, "%s: unable to restart recv logic\n", __func__); ret = EIO; goto finish; } /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ ath_chan_change(sc, chan); /* * Reset clears the beacon timers; reset them * here if needed. */ if (sc->sc_beacons) { /* restart beacons */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) ath_tdma_config(sc, NULL); else #endif ath_beacon_config(sc, NULL); } /* * Re-enable interrupts. */ #if 0 ath_hal_intrset(ah, sc->sc_imask); #endif } finish: ATH_PCU_LOCK(sc); sc->sc_inreset_cnt--; /* XXX only do this if sc_inreset_cnt == 0? */ ath_hal_intrset(ah, sc->sc_imask); ATH_PCU_UNLOCK(sc); ath_txrx_start(sc); /* XXX ath_start? */ return ret; } /* * Periodically recalibrate the PHY to account * for temperature/environment changes. */ static void ath_calibrate(void *arg) { struct ath_softc *sc = arg; struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; HAL_BOOL longCal, isCalDone = AH_TRUE; HAL_BOOL aniCal, shortCal = AH_FALSE; int nextcal; ATH_LOCK_ASSERT(sc); /* * Force the hardware awake for ANI work. */ ath_power_set_power_state(sc, HAL_PM_AWAKE); /* Skip trying to do this if we're in reset */ if (sc->sc_inreset_cnt) goto restart; if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ goto restart; longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); if (sc->sc_doresetcal) shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); if (aniCal) { sc->sc_stats.ast_ani_cal++; sc->sc_lastani = ticks; ath_hal_ani_poll(ah, sc->sc_curchan); } if (longCal) { sc->sc_stats.ast_per_cal++; sc->sc_lastlongcal = ticks; if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { /* * Rfgain is out of bounds, reset the chip * to load new gain values. */ DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: rfgain change\n", __func__); sc->sc_stats.ast_per_rfgain++; sc->sc_resetcal = 0; sc->sc_doresetcal = AH_TRUE; taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); ath_power_restore_power_state(sc); return; } /* * If this long cal is after an idle period, then * reset the data collection state so we start fresh. */ if (sc->sc_resetcal) { (void) ath_hal_calreset(ah, sc->sc_curchan); sc->sc_lastcalreset = ticks; sc->sc_lastshortcal = ticks; sc->sc_resetcal = 0; sc->sc_doresetcal = AH_TRUE; } } /* Only call if we're doing a short/long cal, not for ANI calibration */ if (shortCal || longCal) { isCalDone = AH_FALSE; if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { if (longCal) { /* * Calibrate noise floor data again in case of change. */ ath_hal_process_noisefloor(ah); } } else { DPRINTF(sc, ATH_DEBUG_ANY, "%s: calibration of channel %u failed\n", __func__, sc->sc_curchan->ic_freq); sc->sc_stats.ast_per_calfail++; } if (shortCal) sc->sc_lastshortcal = ticks; } if (!isCalDone) { restart: /* * Use a shorter interval to potentially collect multiple * data samples required to complete calibration. Once * we're told the work is done we drop back to a longer * interval between requests. We're more aggressive doing * work when operating as an AP to improve operation right * after startup. */ sc->sc_lastshortcal = ticks; nextcal = ath_shortcalinterval*hz/1000; if (sc->sc_opmode != HAL_M_HOSTAP) nextcal *= 10; sc->sc_doresetcal = AH_TRUE; } else { /* nextcal should be the shortest time for next event */ nextcal = ath_longcalinterval*hz; if (sc->sc_lastcalreset == 0) sc->sc_lastcalreset = sc->sc_lastlongcal; else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) sc->sc_resetcal = 1; /* setup reset next trip */ sc->sc_doresetcal = AH_FALSE; } /* ANI calibration may occur more often than short/long/resetcal */ if (ath_anicalinterval > 0) nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); if (nextcal != 0) { DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", __func__, nextcal, isCalDone ? "" : "!"); callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); } else { DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", __func__); /* NB: don't rearm timer */ } /* * Restore power state now that we're done. */ ath_power_restore_power_state(sc); } static void ath_scan_start(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; /* XXX calibration timer? */ /* XXXGL: is constant ieee80211broadcastaddr a correct choice? */ ATH_LOCK(sc); sc->sc_scanning = 1; sc->sc_syncbeacon = 0; rfilt = ath_calcrxfilter(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); ath_hal_setrxfilter(ah, rfilt); ath_hal_setassocid(ah, ieee80211broadcastaddr, 0); ATH_PCU_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", __func__, rfilt, ether_sprintf(ieee80211broadcastaddr)); } static void ath_scan_end(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; ATH_LOCK(sc); sc->sc_scanning = 0; rfilt = ath_calcrxfilter(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); ath_hal_setrxfilter(ah, rfilt); ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); ath_hal_process_noisefloor(ah); ATH_PCU_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); } #ifdef ATH_ENABLE_11N /* * For now, just do a channel change. * * Later, we'll go through the hard slog of suspending tx/rx, changing rate * control state and resetting the hardware without dropping frames out * of the queue. * * The unfortunate trouble here is making absolutely sure that the * channel width change has propagated enough so the hardware * absolutely isn't handed bogus frames for it's current operating * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and * does occur in parallel, we need to make certain we've blocked * any further ongoing TX (and RX, that can cause raw TX) * before we do this. */ static void ath_update_chw(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); ath_set_channel(ic); } #endif /* ATH_ENABLE_11N */ static void ath_set_channel(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); (void) ath_chan_set(sc, ic->ic_curchan); /* * If we are returning to our bss channel then mark state * so the next recv'd beacon's tsf will be used to sync the * beacon timers. Note that since we only hear beacons in * sta/ibss mode this has no effect in other operating modes. */ ATH_LOCK(sc); if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) sc->sc_syncbeacon = 1; ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } /* * Walk the vap list and check if there any vap's in RUN state. */ static int ath_isanyrunningvaps(struct ieee80211vap *this) { struct ieee80211com *ic = this->iv_ic; struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap != this && vap->iv_state >= IEEE80211_S_RUN) return 1; } return 0; } static int ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; struct ath_vap *avp = ATH_VAP(vap); struct ath_hal *ah = sc->sc_ah; struct ieee80211_node *ni = NULL; int i, error, stamode; u_int32_t rfilt; int csa_run_transition = 0; enum ieee80211_state ostate = vap->iv_state; static const HAL_LED_STATE leds[] = { HAL_LED_INIT, /* IEEE80211_S_INIT */ HAL_LED_SCAN, /* IEEE80211_S_SCAN */ HAL_LED_AUTH, /* IEEE80211_S_AUTH */ HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ HAL_LED_RUN, /* IEEE80211_S_CAC */ HAL_LED_RUN, /* IEEE80211_S_RUN */ HAL_LED_RUN, /* IEEE80211_S_CSA */ HAL_LED_RUN, /* IEEE80211_S_SLEEP */ }; DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); /* * net80211 _should_ have the comlock asserted at this point. * There are some comments around the calls to vap->iv_newstate * which indicate that it (newstate) may end up dropping the * lock. This and the subsequent lock assert check after newstate * are an attempt to catch these and figure out how/why. */ IEEE80211_LOCK_ASSERT(ic); /* Before we touch the hardware - wake it up */ ATH_LOCK(sc); /* * If the NIC is in anything other than SLEEP state, * we need to ensure that self-generated frames are * set for PWRMGT=0. Otherwise we may end up with * strange situations. * * XXX TODO: is this actually the case? :-) */ if (nstate != IEEE80211_S_SLEEP) ath_power_setselfgen(sc, HAL_PM_AWAKE); /* * Now, wake the thing up. */ ath_power_set_power_state(sc, HAL_PM_AWAKE); /* * And stop the calibration callout whilst we have * ATH_LOCK held. */ callout_stop(&sc->sc_cal_ch); ATH_UNLOCK(sc); if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) csa_run_transition = 1; ath_hal_setledstate(ah, leds[nstate]); /* set LED */ if (nstate == IEEE80211_S_SCAN) { /* * Scanning: turn off beacon miss and don't beacon. * Mark beacon state so when we reach RUN state we'll * [re]setup beacons. Unblock the task q thread so * deferred interrupt processing is done. */ /* Ensure we stay awake during scan */ ATH_LOCK(sc); ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); ATH_UNLOCK(sc); ath_hal_intrset(ah, sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); sc->sc_beacons = 0; taskqueue_unblock(sc->sc_tq); } ni = ieee80211_ref_node(vap->iv_bss); rfilt = ath_calcrxfilter(sc); stamode = (vap->iv_opmode == IEEE80211_M_STA || vap->iv_opmode == IEEE80211_M_AHDEMO || vap->iv_opmode == IEEE80211_M_IBSS); /* * XXX Dont need to do this (and others) if we've transitioned * from SLEEP->RUN. */ if (stamode && nstate == IEEE80211_S_RUN) { sc->sc_curaid = ni->ni_associd; IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); } DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); ath_hal_setrxfilter(ah, rfilt); /* XXX is this to restore keycache on resume? */ if (vap->iv_opmode != IEEE80211_M_STA && (vap->iv_flags & IEEE80211_F_PRIVACY)) { for (i = 0; i < IEEE80211_WEP_NKID; i++) if (ath_hal_keyisvalid(ah, i)) ath_hal_keysetmac(ah, i, ni->ni_bssid); } /* * Invoke the parent method to do net80211 work. */ error = avp->av_newstate(vap, nstate, arg); if (error != 0) goto bad; /* * See above: ensure av_newstate() doesn't drop the lock * on us. */ IEEE80211_LOCK_ASSERT(ic); if (nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ieee80211_free_node(ni); ni = ieee80211_ref_node(vap->iv_bss); DPRINTF(sc, ATH_DEBUG_STATE, "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d\n", __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); switch (vap->iv_opmode) { #ifdef IEEE80211_SUPPORT_TDMA case IEEE80211_M_AHDEMO: if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) break; /* fall thru... */ #endif case IEEE80211_M_HOSTAP: case IEEE80211_M_IBSS: case IEEE80211_M_MBSS: /* * Allocate and setup the beacon frame. * * Stop any previous beacon DMA. This may be * necessary, for example, when an ibss merge * causes reconfiguration; there will be a state * transition from RUN->RUN that means we may * be called with beacon transmission active. */ ath_hal_stoptxdma(ah, sc->sc_bhalq); error = ath_beacon_alloc(sc, ni); if (error != 0) goto bad; /* * If joining an adhoc network defer beacon timer * configuration to the next beacon frame so we * have a current TSF to use. Otherwise we're * starting an ibss/bss so there's no need to delay; * if this is the first vap moving to RUN state, then * beacon state needs to be [re]configured. */ if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf != 0) { sc->sc_syncbeacon = 1; } else if (!sc->sc_beacons) { #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) ath_tdma_config(sc, vap); else #endif ath_beacon_config(sc, vap); sc->sc_beacons = 1; } break; case IEEE80211_M_STA: /* * Defer beacon timer configuration to the next * beacon frame so we have a current TSF to use * (any TSF collected when scanning is likely old). * However if it's due to a CSA -> RUN transition, * force a beacon update so we pick up a lack of * beacons from an AP in CAC and thus force a * scan. * * And, there's also corner cases here where * after a scan, the AP may have disappeared. * In that case, we may not receive an actual * beacon to update the beacon timer and thus we * won't get notified of the missing beacons. */ if (ostate != IEEE80211_S_RUN && ostate != IEEE80211_S_SLEEP) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: STA; syncbeacon=1\n", __func__); sc->sc_syncbeacon = 1; if (csa_run_transition) ath_beacon_config(sc, vap); /* * PR: kern/175227 * * Reconfigure beacons during reset; as otherwise * we won't get the beacon timers reprogrammed * after a reset and thus we won't pick up a * beacon miss interrupt. * * Hopefully we'll see a beacon before the BMISS * timer fires (too often), leading to a STA * disassociation. */ sc->sc_beacons = 1; } break; case IEEE80211_M_MONITOR: /* * Monitor mode vaps have only INIT->RUN and RUN->RUN * transitions so we must re-enable interrupts here to * handle the case of a single monitor mode vap. */ ath_hal_intrset(ah, sc->sc_imask); break; case IEEE80211_M_WDS: break; default: break; } /* * Let the hal process statistics collected during a * scan so it can provide calibrated noise floor data. */ ath_hal_process_noisefloor(ah); /* * Reset rssi stats; maybe not the best place... */ sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; /* * Force awake for RUN mode. */ ATH_LOCK(sc); ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); /* * Finally, start any timers and the task q thread * (in case we didn't go through SCAN state). */ if (ath_longcalinterval != 0) { /* start periodic recalibration timer */ callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); } else { DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", __func__); } ATH_UNLOCK(sc); taskqueue_unblock(sc->sc_tq); } else if (nstate == IEEE80211_S_INIT) { /* * If there are no vaps left in RUN state then * shutdown host/driver operation: * o disable interrupts * o disable the task queue thread * o mark beacon processing as stopped */ if (!ath_isanyrunningvaps(vap)) { sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); /* disable interrupts */ ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); taskqueue_block(sc->sc_tq); sc->sc_beacons = 0; } #ifdef IEEE80211_SUPPORT_TDMA ath_hal_setcca(ah, AH_TRUE); #endif } else if (nstate == IEEE80211_S_SLEEP) { /* We're going to sleep, so transition appropriately */ /* For now, only do this if we're a single STA vap */ if (sc->sc_nvaps == 1 && vap->iv_opmode == IEEE80211_M_STA) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon); ATH_LOCK(sc); /* * Always at least set the self-generated * frame config to set PWRMGT=1. */ ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); /* * If we're not syncing beacons, transition * to NETWORK_SLEEP. * * We stay awake if syncbeacon > 0 in case * we need to listen for some beacons otherwise * our beacon timer config may be wrong. */ if (sc->sc_syncbeacon == 0) { ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP, 1); } ATH_UNLOCK(sc); } } bad: ieee80211_free_node(ni); /* * Restore the power state - either to what it was, or * to network_sleep if it's alright. */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return error; } /* * Allocate a key cache slot to the station so we can * setup a mapping from key index to node. The key cache * slot is needed for managing antenna state and for * compression when stations do not use crypto. We do * it uniliaterally here; if crypto is employed this slot * will be reassigned. */ static void ath_setup_stationkey(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_softc; ieee80211_keyix keyix, rxkeyix; /* XXX should take a locked ref to vap->iv_bss */ if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { /* * Key cache is full; we'll fall back to doing * the more expensive lookup in software. Note * this also means no h/w compression. */ /* XXX msg+statistic */ } else { /* XXX locking? */ ni->ni_ucastkey.wk_keyix = keyix; ni->ni_ucastkey.wk_rxkeyix = rxkeyix; /* NB: must mark device key to get called back on delete */ ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); /* NB: this will create a pass-thru key entry */ ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); } } /* * Setup driver-specific state for a newly associated node. * Note that we're called also on a re-associate, the isnew * param tells us if this is the first time or not. */ static void ath_newassoc(struct ieee80211_node *ni, int isnew) { struct ath_node *an = ATH_NODE(ni); struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_softc; const struct ieee80211_txparam *tp = ni->ni_txparms; an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n", __func__, ni->ni_macaddr, ":", isnew, an->an_is_powersave); ATH_NODE_LOCK(an); ath_rate_newassoc(sc, an, isnew); ATH_NODE_UNLOCK(an); if (isnew && (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) ath_setup_stationkey(ni); /* * If we're reassociating, make sure that any paused queues * get unpaused. * * Now, we may have frames in the hardware queue for this node. * So if we are reassociating and there are frames in the queue, * we need to go through the cleanup path to ensure that they're * marked as non-aggregate. */ if (! isnew) { DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; is_powersave=%d\n", __func__, ni->ni_macaddr, ":", an->an_is_powersave); /* XXX for now, we can't hold the lock across assoc */ ath_tx_node_reassoc(sc, an); /* XXX for now, we can't hold the lock across wakeup */ if (an->an_is_powersave) ath_tx_node_wakeup(sc, an); } } static int ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, int nchans, struct ieee80211_channel chans[]) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: rd %u cc %u location %c%s\n", __func__, reg->regdomain, reg->country, reg->location, reg->ecm ? " ecm" : ""); status = ath_hal_set_channels(ah, chans, nchans, reg->country, reg->regdomain); if (status != HAL_OK) { DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", __func__, status); return EINVAL; /* XXX */ } return 0; } static void ath_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", __func__, SKU_DEBUG, CTRY_DEFAULT); /* XXX check return */ (void) ath_hal_getchannels(ah, chans, maxchans, nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); } static int ath_getchannels(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; /* * Collect channel set based on EEPROM contents. */ status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); if (status != HAL_OK) { device_printf(sc->sc_dev, "%s: unable to collect channel list from hal, status %d\n", __func__, status); return EINVAL; } (void) ath_hal_getregdomain(ah, &sc->sc_eerd); ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ /* XXX map Atheros sku's to net80211 SKU's */ /* XXX net80211 types too small */ ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ ic->ic_regdomain.isocc[1] = ' '; ic->ic_regdomain.ecm = 1; ic->ic_regdomain.location = 'I'; DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", __func__, sc->sc_eerd, sc->sc_eecc, ic->ic_regdomain.regdomain, ic->ic_regdomain.country, ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); return 0; } static int ath_rate_setup(struct ath_softc *sc, u_int mode) { struct ath_hal *ah = sc->sc_ah; const HAL_RATE_TABLE *rt; switch (mode) { case IEEE80211_MODE_11A: rt = ath_hal_getratetable(ah, HAL_MODE_11A); break; case IEEE80211_MODE_HALF: rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); break; case IEEE80211_MODE_QUARTER: rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); break; case IEEE80211_MODE_11B: rt = ath_hal_getratetable(ah, HAL_MODE_11B); break; case IEEE80211_MODE_11G: rt = ath_hal_getratetable(ah, HAL_MODE_11G); break; case IEEE80211_MODE_TURBO_A: rt = ath_hal_getratetable(ah, HAL_MODE_108A); break; case IEEE80211_MODE_TURBO_G: rt = ath_hal_getratetable(ah, HAL_MODE_108G); break; case IEEE80211_MODE_STURBO_A: rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); break; case IEEE80211_MODE_11NA: rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); break; case IEEE80211_MODE_11NG: rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); break; default: DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", __func__, mode); return 0; } sc->sc_rates[mode] = rt; return (rt != NULL); } static void ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) { /* NB: on/off times from the Atheros NDIS driver, w/ permission */ static const struct { u_int rate; /* tx/rx 802.11 rate */ u_int16_t timeOn; /* LED on time (ms) */ u_int16_t timeOff; /* LED off time (ms) */ } blinkrates[] = { { 108, 40, 10 }, { 96, 44, 11 }, { 72, 50, 13 }, { 48, 57, 14 }, { 36, 67, 16 }, { 24, 80, 20 }, { 22, 100, 25 }, { 18, 133, 34 }, { 12, 160, 40 }, { 10, 200, 50 }, { 6, 240, 58 }, { 4, 267, 66 }, { 2, 400, 100 }, { 0, 500, 130 }, /* XXX half/quarter rates */ }; const HAL_RATE_TABLE *rt; int i, j; memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); rt = sc->sc_rates[mode]; KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); for (i = 0; i < rt->rateCount; i++) { uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; if (rt->info[i].phy != IEEE80211_T_HT) sc->sc_rixmap[ieeerate] = i; else sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; } memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); for (i = 0; i < nitems(sc->sc_hwmap); i++) { if (i >= rt->rateCount) { sc->sc_hwmap[i].ledon = (500 * hz) / 1000; sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; continue; } sc->sc_hwmap[i].ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; if (rt->info[i].phy == IEEE80211_T_HT) sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; if (rt->info[i].shortPreamble || rt->info[i].phy == IEEE80211_T_OFDM) sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; for (j = 0; j < nitems(blinkrates)-1; j++) if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) break; /* NB: this uses the last entry if the rate isn't found */ /* XXX beware of overlow */ sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; } sc->sc_currates = rt; sc->sc_curmode = mode; /* * All protection frames are transmitted at 2Mb/s for * 11g, otherwise at 1Mb/s. */ if (mode == IEEE80211_MODE_11G) sc->sc_protrix = ath_tx_findrix(sc, 2*2); else sc->sc_protrix = ath_tx_findrix(sc, 2*1); /* NB: caller is responsible for resetting rate control state */ } static void ath_watchdog(void *arg) { struct ath_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; int do_reset = 0; ATH_LOCK_ASSERT(sc); if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { uint32_t hangs; ath_power_set_power_state(sc, HAL_PM_AWAKE); if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && hangs != 0) { device_printf(sc->sc_dev, "%s hang detected (0x%x)\n", hangs & 0xff ? "bb" : "mac", hangs); } else device_printf(sc->sc_dev, "device timeout\n"); do_reset = 1; counter_u64_add(ic->ic_oerrors, 1); sc->sc_stats.ast_watchdog++; ath_power_restore_power_state(sc); } /* * We can't hold the lock across the ath_reset() call. * * And since this routine can't hold a lock and sleep, * do the reset deferred. */ if (do_reset) { taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); } callout_schedule(&sc->sc_wd_ch, hz); } static void ath_parent(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; int error = EDOOFUS; ATH_LOCK(sc); if (ic->ic_nrunning > 0) { /* * To avoid rescanning another access point, * do not call ath_init() here. Instead, * only reflect promisc mode settings. */ if (sc->sc_running) { ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_mode_init(sc); ath_power_restore_power_state(sc); } else if (!sc->sc_invalid) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ error = ath_init(sc); } } else { ath_stop(sc); if (!sc->sc_invalid) ath_power_setpower(sc, HAL_PM_FULL_SLEEP, 1); } ATH_UNLOCK(sc); if (error == 0) { #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->start(sc->sc_tx99); else #endif ieee80211_start_all(ic); } } /* * Announce various information on device/driver attach. */ static void ath_announce(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; device_printf(sc->sc_dev, "%s mac %d.%d RF%s phy %d.%d\n", ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); device_printf(sc->sc_dev, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); if (bootverbose) { int i; for (i = 0; i <= WME_AC_VO; i++) { struct ath_txq *txq = sc->sc_ac2q[i]; device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n", txq->axq_qnum, ieee80211_wme_acnames[i]); } device_printf(sc->sc_dev, "Use hw queue %u for CAB traffic\n", sc->sc_cabq->axq_qnum); device_printf(sc->sc_dev, "Use hw queue %u for beacons\n", sc->sc_bhalq); } if (ath_rxbuf != ATH_RXBUF) device_printf(sc->sc_dev, "using %u rx buffers\n", ath_rxbuf); if (ath_txbuf != ATH_TXBUF) device_printf(sc->sc_dev, "using %u tx buffers\n", ath_txbuf); if (sc->sc_mcastkey && bootverbose) device_printf(sc->sc_dev, "using multicast key search\n"); } static void ath_dfs_tasklet(void *p, int npending) { struct ath_softc *sc = (struct ath_softc *) p; struct ieee80211com *ic = &sc->sc_ic; /* * If previous processing has found a radar event, * signal this to the net80211 layer to begin DFS * processing. */ if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { /* DFS event found, initiate channel change */ /* * XXX doesn't currently tell us whether the event * XXX was found in the primary or extension * XXX channel! */ IEEE80211_LOCK(ic); ieee80211_dfs_notify_radar(ic, sc->sc_curchan); IEEE80211_UNLOCK(ic); } } /* * Enable/disable power save. This must be called with * no TX driver locks currently held, so it should only * be called from the RX path (which doesn't hold any * TX driver locks.) */ static void ath_node_powersave(struct ieee80211_node *ni, int enable) { #ifdef ATH_SW_PSQ struct ath_node *an = ATH_NODE(ni); struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_vap *avp = ATH_VAP(ni->ni_vap); /* XXX and no TXQ locks should be held here */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n", __func__, ni->ni_macaddr, ":", !! enable); /* Suspend or resume software queue handling */ if (enable) ath_tx_node_sleep(sc, an); else ath_tx_node_wakeup(sc, an); /* Update net80211 state */ avp->av_node_ps(ni, enable); #else struct ath_vap *avp = ATH_VAP(ni->ni_vap); /* Update net80211 state */ avp->av_node_ps(ni, enable); #endif/* ATH_SW_PSQ */ } /* * Notification from net80211 that the powersave queue state has * changed. * * Since the software queue also may have some frames: * * + if the node software queue has frames and the TID state * is 0, we set the TIM; * + if the node and the stack are both empty, we clear the TIM bit. * + If the stack tries to set the bit, always set it. * + If the stack tries to clear the bit, only clear it if the * software queue in question is also cleared. * * TODO: this is called during node teardown; so let's ensure this * is all correctly handled and that the TIM bit is cleared. * It may be that the node flush is called _AFTER_ the net80211 * stack clears the TIM. * * Here is the racy part. Since it's possible >1 concurrent, * overlapping TXes will appear complete with a TX completion in * another thread, it's possible that the concurrent TIM calls will * clash. We can't hold the node lock here because setting the * TIM grabs the net80211 comlock and this may cause a LOR. * The solution is either to totally serialise _everything_ at * this point (ie, all TX, completion and any reset/flush go into * one taskqueue) or a new "ath TIM lock" needs to be created that * just wraps the driver state change and this call to avp->av_set_tim(). * * The same race exists in the net80211 power save queue handling * as well. Since multiple transmitting threads may queue frames * into the driver, as well as ps-poll and the driver transmitting * frames (and thus clearing the psq), it's quite possible that * a packet entering the PSQ and a ps-poll being handled will * race, causing the TIM to be cleared and not re-set. */ static int ath_node_set_tim(struct ieee80211_node *ni, int enable) { #ifdef ATH_SW_PSQ struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_node *an = ATH_NODE(ni); struct ath_vap *avp = ATH_VAP(ni->ni_vap); int changed = 0; ATH_TX_LOCK(sc); an->an_stack_psq = enable; /* * This will get called for all operating modes, * even if avp->av_set_tim is unset. * It's currently set for hostap/ibss modes; but * the same infrastructure is used for both STA * and AP/IBSS node power save. */ if (avp->av_set_tim == NULL) { ATH_TX_UNLOCK(sc); return (0); } /* * If setting the bit, always set it here. * If clearing the bit, only clear it if the * software queue is also empty. * * If the node has left power save, just clear the TIM * bit regardless of the state of the power save queue. * * XXX TODO: although atomics are used, it's quite possible * that a race will occur between this and setting/clearing * in another thread. TX completion will occur always in * one thread, however setting/clearing the TIM bit can come * from a variety of different process contexts! */ if (enable && an->an_tim_set == 1) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, tim_set=1, ignoring\n", __func__, ni->ni_macaddr, ":", enable); ATH_TX_UNLOCK(sc); } else if (enable) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, enabling TIM\n", __func__, ni->ni_macaddr, ":", enable); an->an_tim_set = 1; ATH_TX_UNLOCK(sc); changed = avp->av_set_tim(ni, enable); } else if (an->an_swq_depth == 0) { /* disable */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n", __func__, ni->ni_macaddr, ":", enable); an->an_tim_set = 0; ATH_TX_UNLOCK(sc); changed = avp->av_set_tim(ni, enable); } else if (! an->an_is_powersave) { /* * disable regardless; the node isn't in powersave now */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, an_pwrsave=0, disabling\n", __func__, ni->ni_macaddr, ":", enable); an->an_tim_set = 0; ATH_TX_UNLOCK(sc); changed = avp->av_set_tim(ni, enable); } else { /* * psq disable, node is currently in powersave, node * software queue isn't empty, so don't clear the TIM bit * for now. */ ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n", __func__, ni->ni_macaddr, ":", enable); changed = 0; } return (changed); #else struct ath_vap *avp = ATH_VAP(ni->ni_vap); /* * Some operating modes don't set av_set_tim(), so don't * update it here. */ if (avp->av_set_tim == NULL) return (0); return (avp->av_set_tim(ni, enable)); #endif /* ATH_SW_PSQ */ } /* * Set or update the TIM from the software queue. * * Check the software queue depth before attempting to do lock * anything; that avoids trying to obtain the lock. Then, * re-check afterwards to ensure nothing has changed in the * meantime. * * set: This is designed to be called from the TX path, after * a frame has been queued; to see if the swq > 0. * * clear: This is designed to be called from the buffer completion point * (right now it's ath_tx_default_comp()) where the state of * a software queue has changed. * * It makes sense to place it at buffer free / completion rather * than after each software queue operation, as there's no real * point in churning the TIM bit as the last frames in the software * queue are transmitted. If they fail and we retry them, we'd * just be setting the TIM bit again anyway. */ void ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, int enable) { #ifdef ATH_SW_PSQ struct ath_node *an; struct ath_vap *avp; /* Don't do this for broadcast/etc frames */ if (ni == NULL) return; an = ATH_NODE(ni); avp = ATH_VAP(ni->ni_vap); /* * And for operating modes without the TIM handler set, let's * just skip those. */ if (avp->av_set_tim == NULL) return; ATH_TX_LOCK_ASSERT(sc); if (enable) { if (an->an_is_powersave && an->an_tim_set == 0 && an->an_swq_depth != 0) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: swq_depth>0, tim_set=0, set!\n", __func__, ni->ni_macaddr, ":"); an->an_tim_set = 1; (void) avp->av_set_tim(ni, 1); } } else { /* * Don't bother grabbing the lock unless the queue is empty. */ if (an->an_swq_depth != 0) return; if (an->an_is_powersave && an->an_stack_psq == 0 && an->an_tim_set == 1 && an->an_swq_depth == 0) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: swq_depth=0, tim_set=1, psq_set=0," " clear!\n", __func__, ni->ni_macaddr, ":"); an->an_tim_set = 0; (void) avp->av_set_tim(ni, 0); } } #else return; #endif /* ATH_SW_PSQ */ } /* * Received a ps-poll frame from net80211. * * Here we get a chance to serve out a software-queued frame ourselves * before we punt it to net80211 to transmit us one itself - either * because there's traffic in the net80211 psq, or a NULL frame to * indicate there's nothing else. */ static void ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) { #ifdef ATH_SW_PSQ struct ath_node *an; struct ath_vap *avp; struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; int tid; /* Just paranoia */ if (ni == NULL) return; /* * Unassociated (temporary node) station. */ if (ni->ni_associd == 0) return; /* * We do have an active node, so let's begin looking into it. */ an = ATH_NODE(ni); avp = ATH_VAP(ni->ni_vap); /* * For now, we just call the original ps-poll method. * Once we're ready to flip this on: * * + Set leak to 1, as no matter what we're going to have * to send a frame; * + Check the software queue and if there's something in it, * schedule the highest TID thas has traffic from this node. * Then make sure we schedule the software scheduler to * run so it picks up said frame. * * That way whatever happens, we'll at least send _a_ frame * to the given node. * * Again, yes, it's crappy QoS if the node has multiple * TIDs worth of traffic - but let's get it working first * before we optimise it. * * Also yes, there's definitely latency here - we're not * direct dispatching to the hardware in this path (and * we're likely being called from the packet receive path, * so going back into TX may be a little hairy!) but again * I'd like to get this working first before optimising * turn-around time. */ ATH_TX_LOCK(sc); /* * Legacy - we're called and the node isn't asleep. * Immediately punt. */ if (! an->an_is_powersave) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: not in powersave?\n", __func__, ni->ni_macaddr, ":"); ATH_TX_UNLOCK(sc); avp->av_recv_pspoll(ni, m); return; } /* * We're in powersave. * * Leak a frame. */ an->an_leak_count = 1; /* * Now, if there's no frames in the node, just punt to * recv_pspoll. * * Don't bother checking if the TIM bit is set, we really * only care if there are any frames here! */ if (an->an_swq_depth == 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: SWQ empty; punting to net80211\n", __func__, ni->ni_macaddr, ":"); avp->av_recv_pspoll(ni, m); return; } /* * Ok, let's schedule the highest TID that has traffic * and then schedule something. */ for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { struct ath_tid *atid = &an->an_tid[tid]; /* * No frames? Skip. */ if (atid->axq_depth == 0) continue; ath_tx_tid_sched(sc, atid); /* * XXX we could do a direct call to the TXQ * scheduler code here to optimise latency * at the expense of a REALLY deep callstack. */ ATH_TX_UNLOCK(sc); taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: leaking frame to TID %d\n", __func__, ni->ni_macaddr, ":", tid); return; } ATH_TX_UNLOCK(sc); /* * XXX nothing in the TIDs at this point? Eek. */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", __func__, ni->ni_macaddr, ":"); avp->av_recv_pspoll(ni, m); #else avp->av_recv_pspoll(ni, m); #endif /* ATH_SW_PSQ */ } MODULE_VERSION(if_ath, 1); MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ) MODULE_DEPEND(if_ath, alq, 1, 1, 1); #endif Index: projects/clang400-import/sys/dev/ath/if_ath_tx.c =================================================================== --- projects/clang400-import/sys/dev/ath/if_ath_tx.c (revision 312719) +++ projects/clang400-import/sys/dev/ath/if_ath_tx.c (revision 312720) @@ -1,6258 +1,6293 @@ /*- * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #ifdef ATH_TX99_DIAG #include #endif #include #include #include #ifdef ATH_DEBUG_ALQ #include #endif /* * How many retries to perform in software */ #define SWMAX_RETRIES 10 /* * What queue to throw the non-QoS TID traffic into */ #define ATH_NONQOS_TID_AC WME_AC_VO #if 0 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an); #endif static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid); static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid); static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0); static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid); static struct ath_buf * ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf); #ifdef ATH_DEBUG_ALQ void ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf; int i, n; const char *ds; /* XXX we should skip out early if debugging isn't enabled! */ bf = bf_first; while (bf != NULL) { /* XXX should ensure bf_nseg > 0! */ if (bf->bf_nseg == 0) break; n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; for (i = 0, ds = (const char *) bf->bf_desc; i < n; i++, ds += sc->sc_tx_desclen) { if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC, sc->sc_tx_desclen, ds); } bf = bf->bf_next; } } #endif /* ATH_DEBUG_ALQ */ /* * Whether to use the 11n rate scenario functions or not */ static inline int ath_tx_is_11n(struct ath_softc *sc) { return ((sc->sc_ah->ah_magic == 0x20065416) || (sc->sc_ah->ah_magic == 0x19741014)); } /* * Obtain the current TID from the given frame. * * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.) * This has implications for which AC/priority the packet is placed * in. */ static int ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0) { const struct ieee80211_frame *wh; int pri = M_WME_GETAC(m0); wh = mtod(m0, const struct ieee80211_frame *); if (! IEEE80211_QOS_HAS_SEQ(wh)) return IEEE80211_NONQOS_TID; else return WME_AC_TO_TID(pri); } static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* Only update/resync if needed */ if (bf->bf_state.bfs_isretried == 0) { wh->i_fc[1] |= IEEE80211_FC1_RETRY; bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); } bf->bf_state.bfs_isretried = 1; bf->bf_state.bfs_retries ++; } /* * Determine what the correct AC queue for the given frame * should be. * * This code assumes that the TIDs map consistently to * the underlying hardware (or software) ath_txq. * Since the sender may try to set an AC which is * arbitrary, non-QoS TIDs may end up being put on * completely different ACs. There's no way to put a * TID into multiple ath_txq's for scheduling, so * for now we override the AC/TXQ selection and set * non-QOS TID frames into the BE queue. * * This may be completely incorrect - specifically, * some management frames may end up out of order * compared to the QoS traffic they're controlling. * I'll look into this later. */ static int ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0) { const struct ieee80211_frame *wh; int pri = M_WME_GETAC(m0); wh = mtod(m0, const struct ieee80211_frame *); if (IEEE80211_QOS_HAS_SEQ(wh)) return pri; return ATH_NONQOS_TID_AC; } void ath_txfrag_cleanup(struct ath_softc *sc, ath_bufhead *frags, struct ieee80211_node *ni) { struct ath_buf *bf, *next; ATH_TXBUF_LOCK_ASSERT(sc); TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { /* NB: bf assumed clean */ TAILQ_REMOVE(frags, bf, bf_list); ath_returnbuf_head(sc, bf); ieee80211_node_decref(ni); } } /* * Setup xmit of a fragmented frame. Allocate a buffer * for each frag and bump the node reference count to * reflect the held reference to be setup by ath_tx_start. */ int ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, struct mbuf *m0, struct ieee80211_node *ni) { struct mbuf *m; struct ath_buf *bf; ATH_TXBUF_LOCK(sc); for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { /* XXX non-management? */ bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); if (bf == NULL) { /* out of buffers, cleanup */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n", __func__); ath_txfrag_cleanup(sc, frags, ni); break; } ieee80211_node_incref(ni); TAILQ_INSERT_TAIL(frags, bf, bf_list); } ATH_TXBUF_UNLOCK(sc); return !TAILQ_EMPTY(frags); } static int ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = ATH_MAX_SCATTER + 1; } else if (error != 0) { sc->sc_stats.ast_tx_busdma++; ieee80211_free_mbuf(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ sc->sc_stats.ast_tx_linear++; m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER); if (m == NULL) { ieee80211_free_mbuf(m0); sc->sc_stats.ast_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.ast_tx_busdma++; ieee80211_free_mbuf(m0); return error; } KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.ast_tx_nodata++; ieee80211_free_mbuf(m0); return EIO; } DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } /* * Chain together segments+descriptors for a frame - 11n or otherwise. * * For aggregates, this is called on each frame in the aggregate. */ static void ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, struct ath_buf *bf, int is_aggr, int is_first_subframe, int is_last_subframe) { struct ath_hal *ah = sc->sc_ah; char *ds; int i, bp, dsp; HAL_DMA_ADDR bufAddrList[4]; uint32_t segLenList[4]; int numTxMaps = 1; int isFirstDesc = 1; /* * XXX There's txdma and txdma_mgmt; the descriptor * sizes must match. */ struct ath_descdma *dd = &sc->sc_txdma; /* * Fillin the remainder of the descriptor info. */ /* * We need the number of TX data pointers in each descriptor. * EDMA and later chips support 4 TX buffers per descriptor; * previous chips just support one. */ numTxMaps = sc->sc_tx_nmaps; /* * For EDMA and later chips ensure the TX map is fully populated * before advancing to the next descriptor. */ ds = (char *) bf->bf_desc; bp = dsp = 0; bzero(bufAddrList, sizeof(bufAddrList)); bzero(segLenList, sizeof(segLenList)); for (i = 0; i < bf->bf_nseg; i++) { bufAddrList[bp] = bf->bf_segs[i].ds_addr; segLenList[bp] = bf->bf_segs[i].ds_len; bp++; /* * Go to the next segment if this isn't the last segment * and there's space in the current TX map. */ if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) continue; /* * Last segment or we're out of buffer pointers. */ bp = 0; if (i == bf->bf_nseg - 1) ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0); else ath_hal_settxdesclink(ah, (struct ath_desc *) ds, bf->bf_daddr + dd->dd_descsize * (dsp + 1)); /* * XXX This assumes that bfs_txq is the actual destination * hardware queue at this point. It may not have been * assigned, it may actually be pointing to the multicast * software TXQ id. These must be fixed! */ ath_hal_filltxdesc(ah, (struct ath_desc *) ds , bufAddrList , segLenList , bf->bf_descid /* XXX desc id */ , bf->bf_state.bfs_tx_queue , isFirstDesc /* first segment */ , i == bf->bf_nseg - 1 /* last segment */ , (struct ath_desc *) ds0 /* first descriptor */ ); /* * Make sure the 11n aggregate fields are cleared. * * XXX TODO: this doesn't need to be called for * aggregate frames; as it'll be called on all * sub-frames. Since the descriptors are in * non-cacheable memory, this leads to some * rather slow writes on MIPS/ARM platforms. */ if (ath_tx_is_11n(sc)) ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); /* * If 11n is enabled, set it up as if it's an aggregate * frame. */ if (is_last_subframe) { ath_hal_set11n_aggr_last(sc->sc_ah, (struct ath_desc *) ds); } else if (is_aggr) { /* * This clears the aggrlen field; so * the caller needs to call set_aggr_first()! * * XXX TODO: don't call this for the first * descriptor in the first frame in an * aggregate! */ ath_hal_set11n_aggr_middle(sc->sc_ah, (struct ath_desc *) ds, bf->bf_state.bfs_ndelim); } isFirstDesc = 0; bf->bf_lastds = (struct ath_desc *) ds; /* * Don't forget to skip to the next descriptor. */ ds += sc->sc_tx_desclen; dsp++; /* * .. and don't forget to blank these out! */ bzero(bufAddrList, sizeof(bufAddrList)); bzero(segLenList, sizeof(segLenList)); } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); } /* * Set the rate control fields in the given descriptor based on * the bf_state fields and node state. * * The bfs fields should already be set with the relevant rate * control information, including whether MRR is to be enabled. * * Since the FreeBSD HAL currently sets up the first TX rate * in ath_hal_setuptxdesc(), this will setup the MRR * conditionally for the pre-11n chips, and call ath_buf_set_rate * unconditionally for 11n chips. These require the 11n rate * scenario to be set if MCS rates are enabled, so it's easier * to just always call it. The caller can then only set rates 2, 3 * and 4 if multi-rate retry is needed. */ static void ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf) { struct ath_rc_series *rc = bf->bf_state.bfs_rc; /* If mrr is disabled, blank tries 1, 2, 3 */ if (! bf->bf_state.bfs_ismrr) rc[1].tries = rc[2].tries = rc[3].tries = 0; #if 0 /* * If NOACK is set, just set ntries=1. */ else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { rc[1].tries = rc[2].tries = rc[3].tries = 0; rc[0].tries = 1; } #endif /* * Always call - that way a retried descriptor will * have the MRR fields overwritten. * * XXX TODO: see if this is really needed - setting up * the first descriptor should set the MRR fields to 0 * for us anyway. */ if (ath_tx_is_11n(sc)) { ath_buf_set_rate(sc, ni, bf); } else { ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc , rc[1].ratecode, rc[1].tries , rc[2].ratecode, rc[2].tries , rc[3].ratecode, rc[3].tries ); } } /* * Setup segments+descriptors for an 11n aggregate. * bf_first is the first buffer in the aggregate. * The descriptor list must already been linked together using * bf->bf_next. */ static void ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf, *bf_prev = NULL; struct ath_desc *ds0 = bf_first->bf_desc; DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n", __func__, bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_al); bf = bf_first; if (bf->bf_state.bfs_txrate0 == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); if (bf->bf_state.bfs_rc[0].ratecode == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n", __func__, bf, 0); /* * Setup all descriptors of all subframes - this will * call ath_hal_set11naggrmiddle() on every frame. */ while (bf != NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n", __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, SEQNO(bf->bf_state.bfs_seqno)); /* * Setup the initial fields for the first descriptor - all * the non-11n specific stuff. */ ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc , bf->bf_state.bfs_pktlen /* packet length */ , bf->bf_state.bfs_hdrlen /* header length */ , bf->bf_state.bfs_atype /* Atheros packet type */ , bf->bf_state.bfs_txpower /* txpower */ , bf->bf_state.bfs_txrate0 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ , bf->bf_state.bfs_keyix /* key cache index */ , bf->bf_state.bfs_txantenna /* antenna mode */ , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ , bf->bf_state.bfs_ctsrate /* rts/cts rate */ , bf->bf_state.bfs_ctsduration /* rts/cts duration */ ); /* * First descriptor? Setup the rate control and initial * aggregate header information. */ if (bf == bf_first) { /* * setup first desc with rate and aggr info */ ath_tx_set_ratectrl(sc, bf->bf_node, bf); } /* * Setup the descriptors for a multi-descriptor frame. * This is both aggregate and non-aggregate aware. */ ath_tx_chaindesclist(sc, ds0, bf, 1, /* is_aggr */ !! (bf == bf_first), /* is_first_subframe */ !! (bf->bf_next == NULL) /* is_last_subframe */ ); if (bf == bf_first) { /* * Initialise the first 11n aggregate with the * aggregate length and aggregate enable bits. */ ath_hal_set11n_aggr_first(sc->sc_ah, ds0, bf->bf_state.bfs_al, bf->bf_state.bfs_ndelim); } /* * Link the last descriptor of the previous frame * to the beginning descriptor of this frame. */ if (bf_prev != NULL) ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, bf->bf_daddr); /* Save a copy so we can link the next descriptor in */ bf_prev = bf; bf = bf->bf_next; } /* * Set the first descriptor bf_lastds field to point to * the last descriptor in the last subframe, that's where * the status update will occur. */ bf_first->bf_lastds = bf_prev->bf_lastds; /* * And bf_last in the first descriptor points to the end of * the aggregate list. */ bf_first->bf_last = bf_prev; /* * For non-AR9300 NICs, which require the rate control * in the final descriptor - let's set that up now. * * This is because the filltxdesc() HAL call doesn't * populate the last segment with rate control information * if firstSeg is also true. For non-aggregate frames * that is fine, as the first frame already has rate control * info. But if the last frame in an aggregate has one * descriptor, both firstseg and lastseg will be true and * the rate info isn't copied. * * This is inefficient on MIPS/ARM platforms that have * non-cachable memory for TX descriptors, but we'll just * make do for now. * * As to why the rate table is stashed in the last descriptor * rather than the first descriptor? Because proctxdesc() * is called on the final descriptor in an MPDU or A-MPDU - * ie, the one that gets updated by the hardware upon * completion. That way proctxdesc() doesn't need to know * about the first _and_ last TX descriptor. */ ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__); } /* * Hand-off a frame to the multicast TX queue. * * This is a software TXQ which will be appended to the CAB queue * during the beacon setup code. * * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated * with the actual hardware txq, or all of this will fall apart. * * XXX It may not be a bad idea to just stuff the QCU ID into bf_state * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated * correctly. */ static void ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); /* * Ensure that the tx queue is the cabq, so things get * mapped correctly. */ if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", __func__, bf, bf->bf_state.bfs_tx_queue, txq->axq_qnum); } ATH_TXQ_LOCK(txq); if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); struct ieee80211_frame *wh; /* mark previous frame */ wh = mtod(bf_last->bf_m, struct ieee80211_frame *); wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, BUS_DMASYNC_PREWRITE); /* link descriptor */ ath_hal_settxdesclink(sc->sc_ah, bf_last->bf_lastds, bf->bf_daddr); } ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ATH_TXQ_UNLOCK(txq); } /* * Hand-off packet to a hardware queue. */ static void ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf_first; /* * Insert the frame on the outbound list and pass it on * to the hardware. Multicast frames buffered for power * save stations and transmit from the CAB queue are stored * on a s/w only queue and loaded on to the CAB queue in * the SWBA handler since frames only go out on DTIM and * to avoid possible races. */ ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, ("ath_tx_handoff_hw called for mcast queue")); /* * XXX We should instead just verify that sc_txstart_cnt * or ath_txproc_cnt > 0. That would mean that * the reset is going to be waiting for us to complete. */ if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { device_printf(sc->sc_dev, "%s: TX dispatch without holding txcount/txstart refcnt!\n", __func__); } /* * XXX .. this is going to cause the hardware to get upset; * so we really should find some way to drop or queue * things. */ ATH_TXQ_LOCK(txq); /* * XXX TODO: if there's a holdingbf, then * ATH_TXQ_PUTRUNNING should be clear. * * If there is a holdingbf and the list is empty, * then axq_link should be pointing to the holdingbf. * * Otherwise it should point to the last descriptor * in the last ath_buf. * * In any case, we should really ensure that we * update the previous descriptor link pointer to * this descriptor, regardless of all of the above state. * * For now this is captured by having axq_link point * to either the holdingbf (if the TXQ list is empty) * or the end of the list (if the TXQ list isn't empty.) * I'd rather just kill axq_link here and do it as above. */ /* * Append the frame to the TX queue. */ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ATH_KTR(sc, ATH_KTR_TX, 3, "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " "depth=%d", txq->axq_qnum, bf, txq->axq_depth); /* * If there's a link pointer, update it. * * XXX we should replace this with the above logic, just * to kill axq_link with fire. */ if (txq->axq_link != NULL) { *txq->axq_link = bf->bf_daddr; DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); ATH_KTR(sc, ATH_KTR_TX, 5, "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " "lastds=%d", txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, bf->bf_lastds); } /* * If we've not pushed anything into the hardware yet, * push the head of the queue into the TxDP. * * Once we've started DMA, there's no guarantee that * updating the TxDP with a new value will actually work. * So we just don't do that - if we hit the end of the list, * we keep that buffer around (the "holding buffer") and * re-start DMA by updating the link pointer of _that_ * descriptor and then restart DMA. */ if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { bf_first = TAILQ_FIRST(&txq->axq_q); txq->axq_flags |= ATH_TXQ_PUTRUNNING; ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, txq->axq_qnum, (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, txq->axq_depth); ATH_KTR(sc, ATH_KTR_TX, 5, "ath_tx_handoff: TXDP[%u] = %p (%p) " "lastds=%p depth %d", txq->axq_qnum, (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, bf_first->bf_lastds, txq->axq_depth); } /* * Ensure that the bf TXQ matches this TXQ, so later * checking and holding buffer manipulation is sane. */ if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n", __func__, bf, bf->bf_state.bfs_tx_queue, txq->axq_qnum); } /* * Track aggregate queue depth. */ if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth++; /* * Update the link pointer. */ ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); /* * Start DMA. * * If we wrote a TxDP above, DMA will start from here. * * If DMA is running, it'll do nothing. * * If the DMA engine hit the end of the QCU list (ie LINK=NULL, * or VEOL) then it stops at the last transmitted write. * We then append a new frame by updating the link pointer * in that descriptor and then kick TxE here; it will re-read * that last descriptor and find the new descriptor to transmit. * * This is why we keep the holding descriptor around. */ ath_hal_txstart(ah, txq->axq_qnum); ATH_TXQ_UNLOCK(txq); ATH_KTR(sc, ATH_KTR_TX, 1, "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); } /* * Restart TX DMA for the given TXQ. * * This must be called whether the queue is empty or not. */ static void ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) { struct ath_buf *bf, *bf_last; ATH_TXQ_LOCK_ASSERT(txq); /* XXX make this ATH_TXQ_FIRST */ bf = TAILQ_FIRST(&txq->axq_q); bf_last = ATH_TXQ_LAST(txq, axq_q_s); if (bf == NULL) return; DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n", __func__, txq->axq_qnum, bf, bf_last, (uint32_t) bf->bf_daddr); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) ath_tx_dump(sc, txq); #endif /* * This is called from a restart, so DMA is known to be * completely stopped. */ KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), ("%s: Q%d: called with PUTRUNNING=1\n", __func__, txq->axq_qnum)); ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); txq->axq_flags |= ATH_TXQ_PUTRUNNING; ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, &txq->axq_link); ath_hal_txstart(sc->sc_ah, txq->axq_qnum); } /* * Hand off a packet to the hardware (or mcast queue.) * * The relevant hardware txq should be locked. */ static void ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) ath_tx_alq_post(sc, bf); #endif if (txq->axq_qnum == ATH_TXQ_SWQ) ath_tx_handoff_mcast(sc, txq, bf); else ath_tx_handoff_hw(sc, txq, bf); } static int ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n", __func__, *hdrlen, *pktlen, isfrag, iswep, m0); if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ return (0); } /* * Adjust the packet + header lengths for the crypto * additions and calculate the h/w key index. When * a s/w mic is done the frame will have had any mic * added to it prior to entry so m0->m_pkthdr.len will * account for it. Otherwise we need to add it to the * packet length. */ cip = k->wk_cipher; (*hdrlen) += cip->ic_header; (*pktlen) += cip->ic_header + cip->ic_trailer; /* NB: frags always have any TKIP MIC done in s/w */ if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) (*pktlen) += cip->ic_miclen; (*keyix) = k->wk_keyix; } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { /* * Use station key cache slot, if assigned. */ (*keyix) = ni->ni_ucastkey.wk_keyix; if ((*keyix) == IEEE80211_KEYIX_NONE) (*keyix) = HAL_TXKEYIX_INVALID; } else (*keyix) = HAL_TXKEYIX_INVALID; return (1); } /* * Calculate whether interoperability protection is required for * this frame. * * This requires the rate control information be filled in, * as the protection requirement depends upon the current * operating mode / PHY. */ static void ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; uint8_t rix; uint16_t flags; int shortPreamble; const HAL_RATE_TABLE *rt = sc->sc_currates; struct ieee80211com *ic = &sc->sc_ic; flags = bf->bf_state.bfs_txflags; rix = bf->bf_state.bfs_rc[0].rix; shortPreamble = bf->bf_state.bfs_shpream; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* Disable frame protection for TOA probe frames */ if (bf->bf_flags & ATH_BUF_TOA_PROBE) { /* XXX count */ flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA); bf->bf_state.bfs_doprot = 0; goto finish; } /* * If 802.11g protection is enabled, determine whether * to use RTS/CTS or just CTS. Note that this is only * done for OFDM unicast frames. */ if ((ic->ic_flags & IEEE80211_F_USEPROT) && rt->info[rix].phy == IEEE80211_T_OFDM && (flags & HAL_TXDESC_NOACK) == 0) { bf->bf_state.bfs_doprot = 1; /* XXX fragments must use CCK rates w/ protection */ if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { flags |= HAL_TXDESC_RTSENA; } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { flags |= HAL_TXDESC_CTSENA; } /* * For frags it would be desirable to use the * highest CCK rate for RTS/CTS. But stations * farther away may detect it at a lower CCK rate * so use the configured protection rate instead * (for now). */ sc->sc_stats.ast_tx_protect++; } /* * If 11n protection is enabled and it's a HT frame, * enable RTS. * * XXX ic_htprotmode or ic_curhtprotmode? * XXX should it_htprotmode only matter if ic_curhtprotmode * XXX indicates it's not a HT pure environment? */ if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && rt->info[rix].phy == IEEE80211_T_HT && (flags & HAL_TXDESC_NOACK) == 0) { flags |= HAL_TXDESC_RTSENA; sc->sc_stats.ast_tx_htprotect++; } finish: bf->bf_state.bfs_txflags = flags; } /* * Update the frame duration given the currently selected rate. * * This also updates the frame duration value, so it will require * a DMA flush. */ static void ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_frame *wh; uint8_t rix; uint16_t flags; int shortPreamble; struct ath_hal *ah = sc->sc_ah; const HAL_RATE_TABLE *rt = sc->sc_currates; int isfrag = bf->bf_m->m_flags & M_FRAG; flags = bf->bf_state.bfs_txflags; rix = bf->bf_state.bfs_rc[0].rix; shortPreamble = bf->bf_state.bfs_shpream; wh = mtod(bf->bf_m, struct ieee80211_frame *); /* * Calculate duration. This logically belongs in the 802.11 * layer but it lacks sufficient information to calculate it. */ if ((flags & HAL_TXDESC_NOACK) == 0 && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { u_int16_t dur; if (shortPreamble) dur = rt->info[rix].spAckDuration; else dur = rt->info[rix].lpAckDuration; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { dur += dur; /* additional SIFS+ACK */ /* * Include the size of next fragment so NAV is * updated properly. The last fragment uses only * the ACK duration * * XXX TODO: ensure that the rate lookup for each * fragment is the same as the rate used by the * first fragment! */ dur += ath_hal_computetxtime(ah, rt, bf->bf_nextfraglen, rix, shortPreamble, AH_TRUE); } if (isfrag) { /* * Force hardware to use computed duration for next * fragment by disabling multi-rate retry which updates * duration based on the multi-rate duration table. */ bf->bf_state.bfs_ismrr = 0; bf->bf_state.bfs_try0 = ATH_TXMGTTRY; /* XXX update bfs_rc[0].try? */ } /* Update the duration field itself */ *(u_int16_t *)wh->i_dur = htole16(dur); } } static uint8_t ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, int cix, int shortPreamble) { uint8_t ctsrate; /* * CTS transmit rate is derived from the transmit rate * by looking in the h/w rate table. We must also factor * in whether or not a short preamble is to be used. */ /* NB: cix is set above where RTS/CTS is enabled */ KASSERT(cix != 0xff, ("cix not setup")); ctsrate = rt->info[cix].rateCode; /* XXX this should only matter for legacy rates */ if (shortPreamble) ctsrate |= rt->info[cix].shortPreamble; return (ctsrate); } /* * Calculate the RTS/CTS duration for legacy frames. */ static int ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, int flags) { int ctsduration = 0; /* This mustn't be called for HT modes */ if (rt->info[cix].phy == IEEE80211_T_HT) { printf("%s: HT rate where it shouldn't be (0x%x)\n", __func__, rt->info[cix].rateCode); return (-1); } /* * Compute the transmit duration based on the frame * size and the size of an ACK frame. We call into the * HAL to do the computation since it depends on the * characteristics of the actual PHY being used. * * NB: CTS is assumed the same size as an ACK so we can * use the precalculated ACK durations. */ if (shortPreamble) { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].spAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_TRUE, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].spAckDuration; } else { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].lpAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_FALSE, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].lpAckDuration; } return (ctsduration); } /* * Update the given ath_buf with updated rts/cts setup and duration * values. * * To support rate lookups for each software retry, the rts/cts rate * and cts duration must be re-calculated. * * This function assumes the RTS/CTS flags have been set as needed; * mrr has been disabled; and the rate control lookup has been done. * * XXX TODO: MRR need only be disabled for the pre-11n NICs. * XXX The 11n NICs support per-rate RTS/CTS configuration. */ static void ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf) { uint16_t ctsduration = 0; uint8_t ctsrate = 0; uint8_t rix = bf->bf_state.bfs_rc[0].rix; uint8_t cix = 0; const HAL_RATE_TABLE *rt = sc->sc_currates; /* * No RTS/CTS enabled? Don't bother. */ if ((bf->bf_state.bfs_txflags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) { /* XXX is this really needed? */ bf->bf_state.bfs_ctsrate = 0; bf->bf_state.bfs_ctsduration = 0; return; } /* * If protection is enabled, use the protection rix control * rate. Otherwise use the rate0 control rate. */ if (bf->bf_state.bfs_doprot) rix = sc->sc_protrix; else rix = bf->bf_state.bfs_rc[0].rix; /* * If the raw path has hard-coded ctsrate0 to something, * use it. */ if (bf->bf_state.bfs_ctsrate0 != 0) cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); else /* Control rate from above */ cix = rt->info[rix].controlRate; /* Calculate the rtscts rate for the given cix */ ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, bf->bf_state.bfs_shpream); /* The 11n chipsets do ctsduration calculations for you */ if (! ath_tx_is_11n(sc)) ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, rt, bf->bf_state.bfs_txflags); /* Squirrel away in ath_buf */ bf->bf_state.bfs_ctsrate = ctsrate; bf->bf_state.bfs_ctsduration = ctsduration; /* * Must disable multi-rate retry when using RTS/CTS. */ if (!sc->sc_mrrprot) { bf->bf_state.bfs_ismrr = 0; bf->bf_state.bfs_try0 = bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ } } /* * Setup the descriptor chain for a normal or fast-frame * frame. * * XXX TODO: extend to include the destination hardware QCU ID. * Make sure that is correct. Make sure that when being added * to the mcastq, the CABQ QCUID is set or things will get a bit * odd. */ static void ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf) { struct ath_desc *ds = bf->bf_desc; struct ath_hal *ah = sc->sc_ah; if (bf->bf_state.bfs_txrate0 == 0) DPRINTF(sc, ATH_DEBUG_XMIT, "%s: bf=%p, txrate0=%d\n", __func__, bf, 0); ath_hal_setuptxdesc(ah, ds , bf->bf_state.bfs_pktlen /* packet length */ , bf->bf_state.bfs_hdrlen /* header length */ , bf->bf_state.bfs_atype /* Atheros packet type */ , bf->bf_state.bfs_txpower /* txpower */ , bf->bf_state.bfs_txrate0 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ , bf->bf_state.bfs_keyix /* key cache index */ , bf->bf_state.bfs_txantenna /* antenna mode */ , bf->bf_state.bfs_txflags /* flags */ , bf->bf_state.bfs_ctsrate /* rts/cts rate */ , bf->bf_state.bfs_ctsduration /* rts/cts duration */ ); /* * This will be overriden when the descriptor chain is written. */ bf->bf_lastds = ds; bf->bf_last = bf; /* Set rate control and descriptor chain for this frame */ ath_tx_set_ratectrl(sc, bf->bf_node, bf); ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0); } /* * Do a rate lookup. * * This performs a rate lookup for the given ath_buf only if it's required. * Non-data frames and raw frames don't require it. * * This populates the primary and MRR entries; MRR values are * then disabled later on if something requires it (eg RTS/CTS on * pre-11n chipsets. * * This needs to be done before the RTS/CTS fields are calculated * as they may depend upon the rate chosen. */ static void ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf) { uint8_t rate, rix; int try0; if (! bf->bf_state.bfs_doratelookup) return; /* Get rid of any previous state */ bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, &rix, &try0, &rate); /* In case MRR is disabled, make sure rc[0] is setup correctly */ bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].ratecode = rate; bf->bf_state.bfs_rc[0].tries = try0; if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, bf->bf_state.bfs_rc); ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); sc->sc_txrix = rix; /* for LED blinking */ sc->sc_lastdatarix = rix; /* for fast frames */ bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_txrate0 = rate; } /* * Update the CLRDMASK bit in the ath_buf if it needs to be set. */ static void ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(bf->bf_node); ATH_TX_LOCK_ASSERT(sc); if (an->clrdmask == 1) { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; an->clrdmask = 0; } } /* * Return whether this frame should be software queued or * direct dispatched. * * When doing powersave, BAR frames should be queued but other management * frames should be directly sent. * * When not doing powersave, stick BAR frames into the hardware queue * so it goes out even though the queue is paused. * * For now, management frames are also software queued by default. */ static int ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, struct mbuf *m0, int *queue_to_head) { struct ieee80211_node *ni = &an->an_node; struct ieee80211_frame *wh; uint8_t type, subtype; wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; (*queue_to_head) = 0; /* If it's not in powersave - direct-dispatch BAR */ if ((ATH_NODE(ni)->an_is_powersave == 0) && type == IEEE80211_FC0_TYPE_CTL && subtype == IEEE80211_FC0_SUBTYPE_BAR) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: BAR: TX'ing direct\n", __func__); return (0); } else if ((ATH_NODE(ni)->an_is_powersave == 1) && type == IEEE80211_FC0_TYPE_CTL && subtype == IEEE80211_FC0_SUBTYPE_BAR) { /* BAR TX whilst asleep; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq: TX'ing\n", __func__); (*queue_to_head) = 1; return (1); } else if ((ATH_NODE(ni)->an_is_powersave == 1) && (type == IEEE80211_FC0_TYPE_MGT || type == IEEE80211_FC0_TYPE_CTL)) { /* * Other control/mgmt frame; bypass software queuing * for now! */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %6D: Node is asleep; sending mgmt " "(type=%d, subtype=%d)\n", __func__, ni->ni_macaddr, ":", type, subtype); return (0); } else { return (1); } } /* * Transmit the given frame to the hardware. * * The frame must already be setup; rate control must already have * been done. * * XXX since the TXQ lock is being held here (and I dislike holding * it for this long when not doing software aggregation), later on * break this function into "setup_normal" and "xmit_normal". The * lock only needs to be held for the ath_tx_handoff call. * * XXX we don't update the leak count here - if we're doing * direct frame dispatch, we need to be able to do it without * decrementing the leak count (eg multicast queue frames.) */ static void ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(bf->bf_node); struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; ATH_TX_LOCK_ASSERT(sc); /* * For now, just enable CLRDMASK. ath_tx_xmit_normal() does * set a completion handler however it doesn't (yet) properly * handle the strict ordering requirements needed for normal, * non-aggregate session frames. * * Once this is implemented, only set CLRDMASK like this for * frames that must go out - eg management/raw frames. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* Setup the descriptor before handoff */ ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* Track per-TID hardware queue depth correctly */ tid->hwq_depth++; /* Assign the completion handler */ bf->bf_comp = ath_tx_normal_comp; /* Hand off to hardware */ ath_tx_handoff(sc, txq, bf); } /* * Do the basic frame setup stuff that's required before the frame * is added to a software queue. * * All frames get mostly the same treatment and it's done once. * Retransmits fiddle with things like the rate control setup, * setting the retransmit bit in the packet; doing relevant DMA/bus * syncing and relinking it (back) into the hardware TX queue. * * Note that this may cause the mbuf to be reallocated, so * m0 may not be valid. */ static int ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; int error, iswep, ismcast, isfrag, ismrr; int keyix, hdrlen, pktlen, try0 = 0; u_int8_t rix = 0, txrate = 0; struct ath_desc *ds; struct ieee80211_frame *wh; u_int subtype, flags; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; HAL_BOOL shortPreamble; struct ath_node *an; u_int pri; /* * To ensure that both sequence numbers and the CCMP PN handling * is "correct", make sure that the relevant TID queue is locked. * Otherwise the CCMP PN and seqno may appear out of order, causing * re-ordered frames to have out of order CCMP PN's, resulting * in many, many frame drops. */ ATH_TX_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isfrag = m0->m_flags & M_FRAG; hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ pktlen = m0->m_pkthdr.len - (hdrlen & 3); /* Handle encryption twiddling if needed */ if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen, &pktlen, &keyix)) { ieee80211_free_mbuf(m0); return EIO; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); pktlen += IEEE80211_CRC_LEN; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); /* setup descriptors */ ds = bf->bf_desc; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); /* * NB: the 802.11 layer marks whether or not we should * use short preamble based on the current mode and * negotiated parameters. */ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { shortPreamble = AH_TRUE; sc->sc_stats.ast_tx_shortpre++; } else { shortPreamble = AH_FALSE; } an = ATH_NODE(ni); //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ flags = 0; ismrr = 0; /* default no multi-rate retry*/ pri = M_WME_GETAC(m0); /* honor classification */ /* XXX use txparams instead of fixed values */ /* * Calculate Atheros packet type from IEEE80211 packet header, * setup for rate calculations, and select h/w transmit queue. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) atype = HAL_PKT_TYPE_BEACON; else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) atype = HAL_PKT_TYPE_PROBE_RESP; else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) atype = HAL_PKT_TYPE_ATIM; else atype = HAL_PKT_TYPE_NORMAL; /* XXX */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_CTL: atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_DATA: atype = HAL_PKT_TYPE_NORMAL; /* default */ /* * Data frames: multicast frames go out at a fixed rate, * EAPOL frames use the mgmt frame rate; otherwise consult * the rate control module for the rate to use. */ if (ismcast) { rix = an->an_mcastrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = 1; } else if (m0->m_flags & M_EAPOL) { /* XXX? maybe always use long preamble? */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMAXTRY; /* XXX?too many? */ } else { /* * Do rate lookup on each TX, rather than using * the hard-coded TX information decided here. */ ismrr = 1; bf->bf_state.bfs_doratelookup = 1; } if (cap->cap_wmeParams[pri].wmep_noackPolicy) flags |= HAL_TXDESC_NOACK; break; default: device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ /* XXX free tx dmamap */ ieee80211_free_mbuf(m0); return EIO; } /* * There are two known scenarios where the frame AC doesn't match * what the destination TXQ is. * * + non-QoS frames (eg management?) that the net80211 stack has * assigned a higher AC to, but since it's a non-QoS TID, it's * being thrown into TID 16. TID 16 gets the AC_BE queue. * It's quite possible that management frames should just be * direct dispatched to hardware rather than go via the software * queue; that should be investigated in the future. There are * some specific scenarios where this doesn't make sense, mostly * surrounding ADDBA request/response - hence why that is special * cased. * * + Multicast frames going into the VAP mcast queue. That shows up * as "TXQ 11". * * This driver should eventually support separate TID and TXQ locking, * allowing for arbitrary AC frames to appear on arbitrary software * queues, being queued to the "correct" hardware queue when needed. */ #if 0 if (txq != sc->sc_ac2q[pri]) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n", __func__, txq, txq->axq_qnum, pri, sc->sc_ac2q[pri], sc->sc_ac2q[pri]->axq_qnum); } #endif /* * Calculate miscellaneous flags. */ if (ismcast) { flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ } else if (pktlen > vap->iv_rtsthreshold && (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ sc->sc_stats.ast_tx_rts++; } if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ sc->sc_stats.ast_tx_noack++; #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { DPRINTF(sc, ATH_DEBUG_TDMA, "%s: discard frame, ACK required w/ TDMA\n", __func__); sc->sc_stats.ast_tdma_ack++; /* XXX free tx dmamap */ ieee80211_free_mbuf(m0); return EIO; } #endif /* * If it's a frame to do location reporting on, * communicate it to the HAL. */ if (ieee80211_get_toa_params(m0, NULL)) { device_printf(sc->sc_dev, "%s: setting TX positioning bit\n", __func__); flags |= HAL_TXDESC_POS; /* * Note: The hardware reports timestamps for * each of the RX'ed packets as part of the packet * exchange. So this means things like RTS/CTS * exchanges, as well as the final ACK. * * So, if you send a RTS-protected NULL data frame, * you'll get an RX report for the RTS response, then * an RX report for the NULL frame, and then the TX * completion at the end. * * NOTE: it doesn't work right for CCK frames; * there's no channel info data provided unless * it's OFDM or HT. Will have to dig into it. */ flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); bf->bf_flags |= ATH_BUF_TOA_PROBE; } #if 0 /* * Placeholder: if you want to transmit with the azimuth * timestamp in the end of the payload, here's where you * should set the TXDESC field. */ flags |= HAL_TXDESC_HWTS; #endif /* * Determine if a tx interrupt should be generated for * this descriptor. We take a tx interrupt to reap * descriptors when the h/w hits an EOL condition or * when the descriptor is specifically marked to generate * an interrupt. We periodically mark descriptors in this * way to insure timely replenishing of the supply needed * for sending frames. Defering interrupts reduces system * load and potentially allows more concurrent work to be * done but if done to aggressively can cause senders to * backup. * * NB: use >= to deal with sc_txintrperiod changing * dynamically through sysctl. */ if (flags & HAL_TXDESC_INTREQ) { txq->axq_intrcnt = 0; } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { flags |= HAL_TXDESC_INTREQ; txq->axq_intrcnt = 0; } /* This point forward is actual TX bits */ /* * At this point we are committed to sending the frame * and we don't need to look at m_nextpkt; clear it in * case this frame is part of frag chain. */ m0->m_nextpkt = NULL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, sc->sc_hwmap[rix].ieeerate, -1); if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isfrag) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* Blank the legacy rate array */ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); /* * ath_buf_set_rate needs at least one rate/try to setup * the rate scenario. */ bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].tries = try0; bf->bf_state.bfs_rc[0].ratecode = txrate; /* Store the decided rate index values away */ bf->bf_state.bfs_pktlen = pktlen; bf->bf_state.bfs_hdrlen = hdrlen; bf->bf_state.bfs_atype = atype; bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); bf->bf_state.bfs_txrate0 = txrate; bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_keyix = keyix; bf->bf_state.bfs_txantenna = sc->sc_txantenna; bf->bf_state.bfs_txflags = flags; bf->bf_state.bfs_shpream = shortPreamble; /* XXX this should be done in ath_tx_setrate() */ bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ bf->bf_state.bfs_ctsrate = 0; /* calculated later */ bf->bf_state.bfs_ctsduration = 0; bf->bf_state.bfs_ismrr = ismrr; return 0; } /* * Queue a frame to the hardware or software queue. * * This can be called by the net80211 code. * * XXX what about locking? Or, push the seqno assign into the * XXX aggregate scheduler so its serialised? * * XXX When sending management frames via ath_raw_xmit(), * should CLRDMASK be set unconditionally? */ int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { struct ieee80211vap *vap = ni->ni_vap; struct ath_vap *avp = ATH_VAP(vap); int r = 0; u_int pri; int tid; struct ath_txq *txq; int ismcast; const struct ieee80211_frame *wh; int is_ampdu, is_ampdu_tx, is_ampdu_pending; ieee80211_seq seqno; uint8_t type, subtype; int queue_to_head; ATH_TX_LOCK_ASSERT(sc); /* * Determine the target hardware queue. * * For multicast frames, the txq gets overridden appropriately * depending upon the state of PS. * * For any other frame, we do a TID/QoS lookup inside the frame * to see what the TID should be. If it's a non-QoS frame, the * AC and TID are overridden. The TID/TXQ code assumes the * TID is on a predictable hardware TXQ, so we don't support * having a node TID queued to multiple hardware TXQs. * This may change in the future but would require some locking * fudgery. */ pri = ath_tx_getac(sc, m0); tid = ath_tx_gettid(sc, m0); txq = sc->sc_ac2q[pri]; wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* * Enforce how deep the multicast queue can grow. * * XXX duplicated in ath_raw_xmit(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; m_freem(m0); return (ENOBUFS); } } /* * Enforce how deep the unicast queue can grow. * * If the node is in power save then we don't want * the software queue to grow too deep, or a node may * end up consuming all of the ath_buf entries. * * For now, only do this for DATA frames. * * We will want to cap how many management/control * frames get punted to the software queue so it doesn't * fill up. But the correct solution isn't yet obvious. * In any case, this check should at least let frames pass * that we are direct-dispatching. * * XXX TODO: duplicate this to the raw xmit path! */ if (type == IEEE80211_FC0_TYPE_DATA && ATH_NODE(ni)->an_is_powersave && ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_psq_maxdepth) { sc->sc_stats.ast_tx_node_psq_overflow++; m_freem(m0); return (ENOBUFS); } /* A-MPDU TX */ is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid); is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid); is_ampdu = is_ampdu_tx | is_ampdu_pending; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n", __func__, tid, pri, is_ampdu); /* Set local packet state, used to queue packets to hardware */ bf->bf_state.bfs_tid = tid; bf->bf_state.bfs_tx_queue = txq->axq_qnum; bf->bf_state.bfs_pri = pri; #if 1 /* * When servicing one or more stations in power-save mode * (or) if there is some mcast data waiting on the mcast * queue (to prevent out of order delivery) multicast frames * must be bufferd until after the beacon. * * TODO: we should lock the mcastq before we check the length. */ if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { txq = &avp->av_mcastq; /* * Mark the frame as eventually belonging on the CAB * queue, so the descriptor setup functions will * correctly initialise the descriptor 'qcuId' field. */ bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; } #endif /* Do the generic frame setup */ /* XXX should just bzero the bf_state? */ bf->bf_state.bfs_dobaw = 0; /* A-MPDU TX? Manually set sequence number */ /* * Don't do it whilst pending; the net80211 layer still * assigns them. */ if (is_ampdu_tx) { /* * Always call; this function will * handle making sure that null data frames * don't get a sequence number from the current * TID and thus mess with the BAW. */ seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0); /* * Don't add QoS NULL frames to the BAW. */ if (IEEE80211_QOS_HAS_SEQ(wh) && subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) { bf->bf_state.bfs_dobaw = 1; } } /* * If needed, the sequence number has been assigned. * Squirrel it away somewhere easy to get to. */ bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; /* Is ampdu pending? fetch the seqno and print it out */ if (is_ampdu_pending) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid %d: ampdu pending, seqno %d\n", __func__, tid, M_SEQNO_GET(m0)); /* This also sets up the DMA map */ r = ath_tx_normal_setup(sc, ni, bf, m0, txq); if (r != 0) goto done; /* At this point m0 could have changed! */ m0 = bf->bf_m; #if 1 /* * If it's a multicast frame, do a direct-dispatch to the * destination hardware queue. Don't bother software * queuing it. */ /* * If it's a BAR frame, do a direct dispatch to the * destination hardware queue. Don't bother software * queuing it, as the TID will now be paused. * Sending a BAR frame can occur from the net80211 txa timer * (ie, retries) or from the ath txtask (completion call.) * It queues directly to hardware because the TID is paused * at this point (and won't be unpaused until the BAR has * either been TXed successfully or max retries has been * reached.) */ /* * Until things are better debugged - if this node is asleep * and we're sending it a non-BAR frame, direct dispatch it. * Why? Because we need to figure out what's actually being * sent - eg, during reassociation/reauthentication after * the node (last) disappeared whilst asleep, the driver should * have unpaused/unsleep'ed the node. So until that is * sorted out, use this workaround. */ if (txq == &avp->av_mcastq) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: mcastq: TX'ing\n", __func__, bf); bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, txq, bf); } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, &queue_to_head)) { ath_tx_swq(sc, ni, txq, queue_to_head, bf); } else { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, txq, bf); } #else /* * For now, since there's no software queue, * direct-dispatch to the hardware. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); ath_tx_xmit_normal(sc, txq, bf); #endif done: return 0; } static int ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; int error, ismcast, ismrr; int keyix, hdrlen, pktlen, try0, txantenna; u_int8_t rix, txrate; struct ieee80211_frame *wh; u_int flags; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; struct ath_desc *ds; u_int pri; int o_tid = -1; int do_override; uint8_t type, subtype; int queue_to_head; struct ath_node *an = ATH_NODE(ni); ATH_TX_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ /* XXX honor IEEE80211_BPF_DATAPAD */ pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; ATH_KTR(sc, ATH_KTR_TX, 2, "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n", __func__, ismcast); pri = params->ibp_pri & 3; /* Override pri if the frame isn't a QoS one */ if (! IEEE80211_QOS_HAS_SEQ(wh)) pri = ath_tx_getac(sc, m0); /* XXX If it's an ADDBA, override the correct queue */ do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid); /* Map ADDBA to the correct priority */ if (do_override) { #if 0 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: overriding tid %d pri %d -> %d\n", __func__, o_tid, pri, TID_TO_WME_AC(o_tid)); #endif pri = TID_TO_WME_AC(o_tid); } /* Handle encryption twiddling if needed */ if (! ath_tx_tag_crypto(sc, ni, m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, &hdrlen, &pktlen, &keyix)) { ieee80211_free_mbuf(m0); return EIO; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); /* Do the generic frame setup */ /* XXX should just bzero the bf_state? */ bf->bf_state.bfs_dobaw = 0; error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); KASSERT((ni != NULL), ("%s: ni=NULL!", __func__)); bf->bf_node = ni; /* NB: held reference */ /* Always enable CLRDMASK for raw frames for now.. */ flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ flags |= HAL_TXDESC_INTREQ; /* force interrupt */ if (params->ibp_flags & IEEE80211_BPF_RTS) flags |= HAL_TXDESC_RTSENA; else if (params->ibp_flags & IEEE80211_BPF_CTS) { /* XXX assume 11g/11n protection? */ bf->bf_state.bfs_doprot = 1; flags |= HAL_TXDESC_CTSENA; } /* XXX leave ismcast to injector? */ if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) flags |= HAL_TXDESC_NOACK; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); /* Fetch first rate information */ rix = ath_tx_findrix(sc, params->ibp_rate0); try0 = params->ibp_try0; /* * Override EAPOL rate as appropriate. */ if (m0->m_flags & M_EAPOL) { /* XXX? maybe always use long preamble? */ rix = an->an_mgmtrix; try0 = ATH_TXMAXTRY; /* XXX?too many? */ } /* * If it's a frame to do location reporting on, * communicate it to the HAL. */ if (ieee80211_get_toa_params(m0, NULL)) { device_printf(sc->sc_dev, "%s: setting TX positioning bit\n", __func__); flags |= HAL_TXDESC_POS; flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA); bf->bf_flags |= ATH_BUF_TOA_PROBE; } txrate = rt->info[rix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) txrate |= rt->info[rix].shortPreamble; sc->sc_txrix = rix; ismrr = (params->ibp_try1 != 0); txantenna = params->ibp_pri >> 2; if (txantenna == 0) /* XXX? */ txantenna = sc->sc_txantenna; /* * Since ctsrate is fixed, store it away for later * use when the descriptor fields are being set. */ if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; /* * NB: we mark all packets as type PSPOLL so the h/w won't * set the sequence number, duration, etc. */ atype = HAL_PKT_TYPE_PSPOLL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, sc->sc_hwmap[rix].ieeerate, -1); if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (m0->m_flags & M_FRAG) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, ieee80211_get_node_txpower(ni)); sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Formulate first tx descriptor with tx controls. */ ds = bf->bf_desc; /* XXX check return value? */ /* Store the decided rate index values away */ bf->bf_state.bfs_pktlen = pktlen; bf->bf_state.bfs_hdrlen = hdrlen; bf->bf_state.bfs_atype = atype; bf->bf_state.bfs_txpower = MIN(params->ibp_power, ieee80211_get_node_txpower(ni)); bf->bf_state.bfs_txrate0 = txrate; bf->bf_state.bfs_try0 = try0; bf->bf_state.bfs_keyix = keyix; bf->bf_state.bfs_txantenna = txantenna; bf->bf_state.bfs_txflags = flags; bf->bf_state.bfs_shpream = !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); /* Set local packet state, used to queue packets to hardware */ bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; bf->bf_state.bfs_pri = pri; /* XXX this should be done in ath_tx_setrate() */ bf->bf_state.bfs_ctsrate = 0; bf->bf_state.bfs_ctsduration = 0; bf->bf_state.bfs_ismrr = ismrr; /* Blank the legacy rate array */ bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); bf->bf_state.bfs_rc[0].rix = rix; bf->bf_state.bfs_rc[0].tries = try0; bf->bf_state.bfs_rc[0].ratecode = txrate; if (ismrr) { int rix; rix = ath_tx_findrix(sc, params->ibp_rate1); bf->bf_state.bfs_rc[1].rix = rix; bf->bf_state.bfs_rc[1].tries = params->ibp_try1; rix = ath_tx_findrix(sc, params->ibp_rate2); bf->bf_state.bfs_rc[2].rix = rix; bf->bf_state.bfs_rc[2].tries = params->ibp_try2; rix = ath_tx_findrix(sc, params->ibp_rate3); bf->bf_state.bfs_rc[3].rix = rix; bf->bf_state.bfs_rc[3].tries = params->ibp_try3; } /* * All the required rate control decisions have been made; * fill in the rc flags. */ ath_tx_rate_fill_rcflags(sc, bf); /* NB: no buffered multicast in power save support */ /* * If we're overiding the ADDBA destination, dump directly * into the hardware queue, right after any pending * frames to that node are. */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n", __func__, do_override); #if 1 /* * Put addba frames in the right place in the right TID/HWQ. */ if (do_override) { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * XXX if it's addba frames, should we be leaking * them out via the frame leak method? * XXX for now let's not risk it; but we may wish * to investigate this later. */ ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0, &queue_to_head)) { /* Queue to software queue */ ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); } else { bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); } #else /* Direct-dispatch to the hardware */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); #endif return 0; } /* * Send a raw frame. * * This can be called by net80211. */ int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_buf *bf; struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *); int error = 0; ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt > 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: sc_inreset_cnt > 0; bailing\n", __func__); error = EIO; ATH_PCU_UNLOCK(sc); goto badbad; } sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); /* Wake the hardware up already */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_TX_LOCK(sc); if (!sc->sc_running || sc->sc_invalid) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d", __func__, sc->sc_running, sc->sc_invalid); m_freem(m); error = ENETDOWN; goto bad; } /* * Enforce how deep the multicast queue can grow. * * XXX duplicated in ath_tx_start(). */ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth > sc->sc_txq_mcastq_maxdepth) { sc->sc_stats.ast_tx_mcastq_overflow++; error = ENOBUFS; } if (error != 0) { m_freem(m); goto bad; } } /* * Grab a TX buffer and associated resources. */ bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); if (bf == NULL) { sc->sc_stats.ast_tx_nobuf++; m_freem(m); error = ENOBUFS; goto bad; } ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n", m, params, bf); if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ath_tx_start(sc, ni, bf, m)) { error = EIO; /* XXX */ goto bad2; } } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ath_tx_raw_start(sc, ni, bf, m, params)) { error = EIO; /* XXX */ goto bad2; } } sc->sc_wd_timer = 5; sc->sc_stats.ast_tx_raw++; /* * Update the TIM - if there's anything queued to the * software queue and power save is enabled, we should * set the TIM. */ ath_tx_update_tim(sc, ni, 1); ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Put the hardware back to sleep if required */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return 0; bad2: ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, " "bf=%p", m, params, bf); ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, bf); ATH_TXBUF_UNLOCK(sc); bad: ATH_TX_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Put the hardware back to sleep if required */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); badbad: ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p", m, params); sc->sc_stats.ast_tx_raw_fail++; return error; } /* Some helper functions */ /* * ADDBA (and potentially others) need to be placed in the same * hardware queue as the TID/node it's relating to. This is so * it goes out after any pending non-aggregate frames to the * same node/TID. * * If this isn't done, the ADDBA can go out before the frames * queued in hardware. Even though these frames have a sequence * number -earlier- than the ADDBA can be transmitted (but * no frames whose sequence numbers are after the ADDBA should * be!) they'll arrive after the ADDBA - and the receiving end * will simply drop them as being out of the BAW. * * The frames can't be appended to the TID software queue - it'll * never be sent out. So these frames have to be directly * dispatched to the hardware, rather than queued in software. * So if this function returns true, the TXQ has to be * overridden and it has to be directly dispatched. * * It's a dirty hack, but someone's gotta do it. */ /* * XXX doesn't belong here! */ static int ieee80211_is_action(struct ieee80211_frame *wh) { /* Type: Management frame? */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT) return 0; /* Subtype: Action frame? */ if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != IEEE80211_FC0_SUBTYPE_ACTION) return 0; return 1; } #define MS(_v, _f) (((_v) & _f) >> _f##_S) /* * Return an alternate TID for ADDBA request frames. * * Yes, this likely should be done in the net80211 layer. */ static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid) { struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *); struct ieee80211_action_ba_addbarequest *ia; uint8_t *frm; uint16_t baparamset; /* Not action frame? Bail */ if (! ieee80211_is_action(wh)) return 0; /* XXX Not needed for frames we send? */ #if 0 /* Correct length? */ if (! ieee80211_parse_action(ni, m)) return 0; #endif /* Extract out action frame */ frm = (u_int8_t *)&wh[1]; ia = (struct ieee80211_action_ba_addbarequest *) frm; /* Not ADDBA? Bail */ if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) return 0; if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) return 0; /* Extract TID, return it */ baparamset = le16toh(ia->rq_baparamset); *tid = (int) MS(baparamset, IEEE80211_BAPS_TID); return 1; } #undef MS /* Per-node software queue operations */ /* * Add the current packet to the given BAW. * It is assumed that the current packet * * + fits inside the BAW; * + already has had a sequence number allocated. * * Since the BAW status may be modified by both the ath task and * the net80211/ifnet contexts, the TID must be locked. */ void ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); if (bf->bf_state.bfs_isretried) return; tap = ath_tx_get_tx_tid(an, tid->tid); if (! bf->bf_state.bfs_dobaw) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: dobaw=0, seqno=%d, window %d:%d\n", __func__, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd); } if (bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: re-added? tid=%d, seqno %d; window %d:%d; " "baw head=%d tail=%d\n", __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail); /* * Verify that the given sequence number is not outside of the * BAW. Complain loudly if that's the case. */ if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, SEQNO(bf->bf_state.bfs_seqno))) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; " "baw head=%d tail=%d\n", __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, tid->baw_head, tid->baw_tail); } /* * ni->ni_txseqs[] is the currently allocated seqno. * the txa state contains the current baw start. */ index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d " "baw head=%d tail=%d\n", __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, tid->baw_tail); #if 0 assert(tid->tx_buf[cindex] == NULL); #endif if (tid->tx_buf[cindex] != NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: ba packet dup (index=%d, cindex=%d, " "head=%d, tail=%d)\n", __func__, index, cindex, tid->baw_head, tid->baw_tail); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n", __func__, tid->tx_buf[cindex], SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), bf, SEQNO(bf->bf_state.bfs_seqno) ); } tid->tx_buf[cindex] = bf; if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) { tid->baw_tail = cindex; INCR(tid->baw_tail, ATH_TID_MAX_BUFS); } } /* * Flip the BAW buffer entry over from the existing one to the new one. * * When software retransmitting a (sub-)frame, it is entirely possible that * the frame ath_buf is marked as BUSY and can't be immediately reused. * In that instance the buffer is cloned and the new buffer is used for * retransmit. We thus need to update the ath_buf slot in the BAW buf * tracking array to maintain consistency. */ static void ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(old_bf->bf_state.bfs_seqno); ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); /* * Just warn for now; if it happens then we should find out * about it. It's highly likely the aggregation session will * soon hang. */ if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: retransmitted buffer" " has mismatching seqno's, BA session may hang.\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: old seqno=%d, new_seqno=%d\n", __func__, old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); } if (tid->tx_buf[cindex] != old_bf) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: ath_buf pointer incorrect; " " has m BA session may hang.\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf); } tid->tx_buf[cindex] = new_bf; } /* * seq_start - left edge of BAW * seq_next - current/next sequence number to allocate * * Since the BAW status may be modified by both the ath task and * the net80211/ifnet contexts, the TID must be locked. */ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, const struct ath_buf *bf) { int index, cindex; struct ieee80211_tx_ampdu *tap; int seqno = SEQNO(bf->bf_state.bfs_seqno); ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); index = ATH_BA_INDEX(tap->txa_start, seqno); cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, " "baw head=%d, tail=%d\n", __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, cindex, tid->baw_head, tid->baw_tail); /* * If this occurs then we have a big problem - something else * has slid tap->txa_start along without updating the BAW * tracking start/end pointers. Thus the TX BAW state is now * completely busted. * * But for now, since I haven't yet fixed TDMA and buffer cloning, * it's quite possible that a cloned buffer is making its way * here and causing it to fire off. Disable TDMA for now. */ if (tid->tx_buf[cindex] != bf) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno), tid->tx_buf[cindex], (tid->tx_buf[cindex] != NULL) ? SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); } tid->tx_buf[cindex] = NULL; while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { INCR(tap->txa_start, IEEE80211_SEQ_RANGE); INCR(tid->baw_head, ATH_TID_MAX_BUFS); } DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: tid=%d: baw is now %d:%d, baw head=%d\n", __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); } static void ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ieee80211_frame *wh; ATH_TX_LOCK_ASSERT(sc); if (tid->an->an_leak_count > 0) { wh = mtod(bf->bf_m, struct ieee80211_frame *); /* * Update MORE based on the software/net80211 queue states. */ if ((tid->an->an_stack_psq > 0) || (tid->an->an_swq_depth > 0)) wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; else wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->an->an_leak_count, tid->an->an_stack_psq, tid->an->an_swq_depth, !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); /* * Re-sync the underlying buffer. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); tid->an->an_leak_count --; } } static int ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); if (tid->an->an_leak_count > 0) { return (1); } if (tid->paused) return (0); return (1); } /* * Mark the current node/TID as ready to TX. * * This is done to make it easy for the software scheduler to * find which nodes have data to send. * * The TXQ lock must be held. */ void ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid) { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; ATH_TX_LOCK_ASSERT(sc); /* * If we are leaking out a frame to this destination * for PS-POLL, ensure that we allow scheduling to * occur. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) return; /* paused, can't schedule yet */ if (tid->sched) return; /* already scheduled */ tid->sched = 1; #if 0 /* * If this is a sleeping node we're leaking to, given * it a higher priority. This is so bad for QoS it hurts. */ if (tid->an->an_leak_count) { TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); } else { TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); } #endif /* * We can't do the above - it'll confuse the TXQ software * scheduler which will keep checking the _head_ TID * in the list to see if it has traffic. If we queue * a TID to the head of the list and it doesn't transmit, * we'll check it again. * * So, get the rest of this leaking frames support working * and reliable first and _then_ optimise it so they're * pushed out in front of any other pending software * queued nodes. */ TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); } /* * Mark the current node as no longer needing to be polled for * TX packets. * * The TXQ lock must be held. */ static void ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid) { struct ath_txq *txq = sc->sc_ac2q[tid->ac]; ATH_TX_LOCK_ASSERT(sc); if (tid->sched == 0) return; tid->sched = 0; TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); } /* * Assign a sequence number manually to the given frame. * * This should only be called for A-MPDU TX frames. */ static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { struct ieee80211_frame *wh; int tid, pri; ieee80211_seq seqno; uint8_t subtype; /* TID lookup */ wh = mtod(m0, struct ieee80211_frame *); pri = M_WME_GETAC(m0); /* honor classification */ tid = WME_AC_TO_TID(pri); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n", __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); /* XXX Is it a control frame? Ignore */ /* Does the packet require a sequence number? */ if (! IEEE80211_QOS_HAS_SEQ(wh)) return -1; ATH_TX_LOCK_ASSERT(sc); /* * Is it a QOS NULL Data frame? Give it a sequence number from * the default TID (IEEE80211_NONQOS_TID.) * * The RX path of everything I've looked at doesn't include the NULL * data frame sequence number in the aggregation state updates, so * assigning it a sequence number there will cause a BAW hole on the * RX side. */ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) { /* XXX no locking for this TID? This is a bit of a problem. */ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); } else { /* Manually assign sequence number */ seqno = ni->ni_txseqs[tid]; INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); } *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); M_SEQNO_SET(m0, seqno); /* Return so caller can do something with it if needed */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno); return seqno; } /* * Attempt to direct dispatch an aggregate frame to hardware. * If the frame is out of BAW, queue. * Otherwise, schedule it as a single frame. */ static void ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_txq *txq, struct ath_buf *bf) { struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); tap = ath_tx_get_tx_tid(an, tid->tid); /* paused? queue */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { ATH_TID_INSERT_HEAD(tid, bf, bf_list); /* XXX don't sched - we're paused! */ return; } /* outside baw? queue */ if (bf->bf_state.bfs_dobaw && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, SEQNO(bf->bf_state.bfs_seqno)))) { ATH_TID_INSERT_HEAD(tid, bf, bf_list); ath_tx_tid_sched(sc, tid); return; } /* * This is a temporary check and should be removed once * all the relevant code paths have been fixed. * * During aggregate retries, it's possible that the head * frame will fail (which has the bfs_aggr and bfs_nframes * fields set for said aggregate) and will be retried as * a single frame. In this instance, the values should * be reset or the completion code will get upset with you. */ if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__, bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; } /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* Direct dispatch to hardware */ ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* Statistics */ sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; /* Track per-TID hardware queue depth correctly */ tid->hwq_depth++; /* Add to BAW */ if (bf->bf_state.bfs_dobaw) { ath_tx_addto_baw(sc, an, tid, bf); bf->bf_state.bfs_addedbaw = 1; } /* Set completion handler, multi-frame aggregate or not */ bf->bf_comp = ath_tx_aggr_comp; /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); /* Hand off to hardware */ ath_tx_handoff(sc, txq, bf); } /* * Attempt to send the packet. * If the queue isn't busy, direct-dispatch. * If the queue is busy enough, queue the given packet on the * relevant software queue. */ void ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, int queue_to_head, struct ath_buf *bf) { struct ath_node *an = ATH_NODE(ni); struct ieee80211_frame *wh; struct ath_tid *atid; int pri, tid; struct mbuf *m0 = bf->bf_m; ATH_TX_LOCK_ASSERT(sc); /* Fetch the TID - non-QoS frames get assigned to TID 16 */ wh = mtod(m0, struct ieee80211_frame *); pri = ath_tx_getac(sc, m0); tid = ath_tx_gettid(sc, m0); atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n", __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh)); /* Set local packet state, used to queue packets to hardware */ /* XXX potentially duplicate info, re-check */ bf->bf_state.bfs_tid = tid; bf->bf_state.bfs_tx_queue = txq->axq_qnum; bf->bf_state.bfs_pri = pri; /* * If the hardware queue isn't busy, queue it directly. * If the hardware queue is busy, queue it. * If the TID is paused or the traffic it outside BAW, software * queue it. * * If the node is in power-save and we're leaking a frame, * leak a single frame. */ if (! ath_tx_tid_can_tx_or_sched(sc, atid)) { /* TID is paused, queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__); /* * If the caller requested that it be sent at a high * priority, queue it at the head of the list. */ if (queue_to_head) ATH_TID_INSERT_HEAD(atid, bf, bf_list); else ATH_TID_INSERT_TAIL(atid, bf, bf_list); } else if (ath_tx_ampdu_pending(sc, an, tid)) { /* AMPDU pending; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__); ATH_TID_INSERT_TAIL(atid, bf, bf_list); /* XXX sched? */ } else if (ath_tx_ampdu_running(sc, an, tid)) { - /* AMPDU running, attempt direct dispatch if possible */ + /* + * AMPDU running, queue single-frame if the hardware queue + * isn't busy. + * + * If the hardware queue is busy, sending an aggregate frame + * then just hold off so we can queue more aggregate frames. + * + * Otherwise we may end up with single frames leaking through + * because we are dispatching them too quickly. + * + * TODO: maybe we should treat this as two policies - minimise + * latency, or maximise throughput. Then for BE/BK we can + * maximise throughput, and VO/VI (if AMPDU is enabled!) + * minimise latency. + */ /* * Always queue the frame to the tail of the list. */ ATH_TID_INSERT_TAIL(atid, bf, bf_list); /* * If the hardware queue isn't busy, direct dispatch - * the head frame in the list. Don't schedule the - * TID - let it build some more frames first? + * the head frame in the list. * - * When running A-MPDU, always just check the hardware - * queue depth against the aggregate frame limit. - * We don't want to burst a large number of single frames - * out to the hardware; we want to aggressively hold back. + * Note: if we're say, configured to do ADDBA but not A-MPDU + * then maybe we want to still queue two non-aggregate frames + * to the hardware. Again with the per-TID policy + * configuration..) * * Otherwise, schedule the TID. */ /* XXX TXQ locking */ - if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) { + if (txq->axq_depth + txq->fifo.axq_depth == 0) { + bf = ATH_TID_FIRST(atid); ATH_TID_REMOVE(atid, bf, bf_list); /* * Ensure it's definitely treated as a non-AMPDU * frame - this information may have been left * over from a previous attempt. */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; /* Queue to the hardware */ ath_tx_xmit_aggr(sc, an, txq, bf); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_aggr\n", __func__); } else { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ampdu; swq'ing\n", __func__); ath_tx_tid_sched(sc, atid); } /* * If we're not doing A-MPDU, be prepared to direct dispatch * up to both limits if possible. This particular corner * case may end up with packet starvation between aggregate * traffic and non-aggregate traffic: we want to ensure * that non-aggregate stations get a few frames queued to the * hardware before the aggregate station(s) get their chance. * * So if you only ever see a couple of frames direct dispatched * to the hardware from a non-AMPDU client, check both here * and in the software queue dispatcher to ensure that those * non-AMPDU stations get a fair chance to transmit. */ /* XXX TXQ locking */ } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { /* AMPDU not running, attempt direct dispatch */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__); /* See if clrdmask needs to be set */ ath_tx_update_clrdmask(sc, atid, bf); /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, atid, bf); /* * Dispatch the frame. */ ath_tx_xmit_normal(sc, txq, bf); } else { /* Busy; queue */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__); ATH_TID_INSERT_TAIL(atid, bf, bf_list); ath_tx_tid_sched(sc, atid); } } /* * Only set the clrdmask bit if none of the nodes are currently * filtered. * * XXX TODO: go through all the callers and check to see * which are being called in the context of looping over all * TIDs (eg, if all tids are being paused, resumed, etc.) * That'll avoid O(n^2) complexity here. */ static void ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an) { int i; ATH_TX_LOCK_ASSERT(sc); for (i = 0; i < IEEE80211_TID_SIZE; i++) { if (an->an_tid[i].isfiltered == 1) return; } an->clrdmask = 1; } /* * Configure the per-TID node state. * * This likely belongs in if_ath_node.c but I can't think of anywhere * else to put it just yet. * * This sets up the SLISTs and the mutex as appropriate. */ void ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an) { int i, j; struct ath_tid *atid; for (i = 0; i < IEEE80211_TID_SIZE; i++) { atid = &an->an_tid[i]; /* XXX now with this bzer(), is the field 0'ing needed? */ bzero(atid, sizeof(*atid)); TAILQ_INIT(&atid->tid_q); TAILQ_INIT(&atid->filtq.tid_q); atid->tid = i; atid->an = an; for (j = 0; j < ATH_TID_MAX_BUFS; j++) atid->tx_buf[j] = NULL; atid->baw_head = atid->baw_tail = 0; atid->paused = 0; atid->sched = 0; atid->hwq_depth = 0; atid->cleanup_inprogress = 0; if (i == IEEE80211_NONQOS_TID) atid->ac = ATH_NONQOS_TID_AC; else atid->ac = TID_TO_WME_AC(i); } an->clrdmask = 1; /* Always start by setting this bit */ } /* * Pause the current TID. This stops packets from being transmitted * on it. * * Since this is also called from upper layers as well as the driver, * it will get the TID lock. */ static void ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); tid->paused++; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->paused); } /* * Unpause the current TID, and schedule it if needed. */ static void ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); /* * There's some odd places where ath_tx_tid_resume() is called * when it shouldn't be; this works around that particular issue * until it's actually resolved. */ if (tid->paused == 0) { device_printf(sc->sc_dev, "%s: [%6D]: tid=%d, paused=0?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); } else { tid->paused--; } DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, unpaused = %d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->paused); if (tid->paused) return; /* * Override the clrdmask configuration for the next frame * from this TID, just to get the ball rolling. */ ath_tx_set_clrdmask(sc, tid->an); if (tid->axq_depth == 0) return; /* XXX isfiltered shouldn't ever be 0 at this point */ if (tid->isfiltered == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n", __func__); return; } ath_tx_tid_sched(sc, tid); /* * Queue the software TX scheduler. */ ath_tx_swq_kick(sc); } /* * Add the given ath_buf to the TID filtered frame list. * This requires the TID be filtered. */ static void ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); if (!tid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf); /* Set the retry bit and bump the retry counter */ ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swfiltered++; ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list); } /* * Handle a completed filtered frame from the given TID. * This just enables/pauses the filtered frame state if required * and appends the filtered frame to the filtered queue. */ static void ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); if (! tid->isfiltered) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n", __func__, tid->tid); tid->isfiltered = 1; ath_tx_tid_pause(sc, tid); } /* Add the frame to the filter queue */ ath_tx_tid_filt_addbuf(sc, tid, bf); } /* * Complete the filtered frame TX completion. * * If there are no more frames in the hardware queue, unpause/unfilter * the TID if applicable. Otherwise we will wait for a node PS transition * to unfilter. */ static void ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid) { struct ath_buf *bf; int do_resume = 0; ATH_TX_LOCK_ASSERT(sc); if (tid->hwq_depth != 0) return; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n", __func__, tid->tid); if (tid->isfiltered == 1) { tid->isfiltered = 0; do_resume = 1; } /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */ ath_tx_set_clrdmask(sc, tid->an); /* XXX this is really quite inefficient */ while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) { ATH_TID_FILT_REMOVE(tid, bf, bf_list); ATH_TID_INSERT_HEAD(tid, bf, bf_list); } /* And only resume if we had paused before */ if (do_resume) ath_tx_tid_resume(sc, tid); } /* * Called when a single (aggregate or otherwise) frame is completed. * * Returns 0 if the buffer could be added to the filtered list * (cloned or otherwise), 1 if the buffer couldn't be added to the * filtered list (failed clone; expired retry) and the caller should * free it and handle it like a failure (eg by sending a BAR.) * * since the buffer may be cloned, bf must be not touched after this * if the return value is 0. */ static int ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf) { struct ath_buf *nbf; int retval; ATH_TX_LOCK_ASSERT(sc); /* * Don't allow a filtered frame to live forever. */ if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p, seqno=%d, exceeded retries\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno)); retval = 1; /* error */ goto finish; } /* * A busy buffer can't be added to the retry list. * It needs to be cloned. */ if (bf->bf_flags & ATH_BUF_BUSY) { nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: busy buffer clone: %p -> %p\n", __func__, bf, nbf); } else { nbf = bf; } if (nbf == NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: busy buffer couldn't be cloned (%p)!\n", __func__, bf); retval = 1; /* error */ } else { ath_tx_tid_filt_comp_buf(sc, tid, nbf); retval = 0; /* ok */ } finish: ath_tx_tid_filt_comp_complete(sc, tid); return (retval); } static void ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf_first, ath_bufhead *bf_q) { struct ath_buf *bf, *bf_next, *nbf; ATH_TX_LOCK_ASSERT(sc); bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ /* * Don't allow a filtered frame to live forever. */ if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n", __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); TAILQ_INSERT_TAIL(bf_q, bf, bf_list); goto next; } if (bf->bf_flags & ATH_BUF_BUSY) { nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); } else { nbf = bf; } /* * If the buffer couldn't be cloned, add it to bf_q; * the caller will free the buffer(s) as required. */ if (nbf == NULL) { DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n", __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); TAILQ_INSERT_TAIL(bf_q, bf, bf_list); } else { ath_tx_tid_filt_comp_buf(sc, tid, nbf); } next: bf = bf_next; } ath_tx_tid_filt_comp_complete(sc, tid); } /* * Suspend the queue because we need to TX a BAR. */ static void ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n", __func__, tid->tid, tid->bar_wait, tid->bar_tx); /* We shouldn't be called when bar_tx is 1 */ if (tid->bar_tx) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: bar_tx is 1?!\n", __func__); } /* If we've already been called, just be patient. */ if (tid->bar_wait) return; /* Wait! */ tid->bar_wait = 1; /* Only one pause, no matter how many frames fail */ ath_tx_tid_pause(sc, tid); } /* * We've finished with BAR handling - either we succeeded or * failed. Either way, unsuspend TX. */ static void ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, called\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); if (tid->bar_tx == 0 || tid->bar_wait == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->bar_tx, tid->bar_wait); } tid->bar_tx = tid->bar_wait = 0; ath_tx_tid_resume(sc, tid); } /* * Return whether we're ready to TX a BAR frame. * * Requires the TID lock be held. */ static int ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid) { ATH_TX_LOCK_ASSERT(sc); if (tid->bar_wait == 0 || tid->hwq_depth > 0) return (0); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar ready\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); return (1); } /* * Check whether the current TID is ready to have a BAR * TXed and if so, do the TX. * * Since the TID/TXQ lock can't be held during a call to * ieee80211_send_bar(), we have to do the dirty thing of unlocking it, * sending the BAR and locking it again. * * Eventually, the code to send the BAR should be broken out * from this routine so the lock doesn't have to be reacquired * just to be immediately dropped by the caller. */ static void ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid) { struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, called\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); tap = ath_tx_get_tx_tid(tid->an, tid->tid); /* * This is an error condition! */ if (tid->bar_wait == 0 || tid->bar_tx == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->bar_tx, tid->bar_wait); return; } /* Don't do anything if we still have pending frames */ if (tid->hwq_depth > 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, hwq_depth=%d, waiting\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tid->hwq_depth); return; } /* We're now about to TX */ tid->bar_tx = 1; /* * Override the clrdmask configuration for the next frame, * just to get the ball rolling. */ ath_tx_set_clrdmask(sc, tid->an); /* * Calculate new BAW left edge, now that all frames have either * succeeded or failed. * * XXX verify this is _actually_ the valid value to begin at! */ DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, new BAW left edge=%d\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid, tap->txa_start); /* Try sending the BAR frame */ /* We can't hold the lock here! */ ATH_TX_UNLOCK(sc); if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { /* Success? Now we wait for notification that it's done */ ATH_TX_LOCK(sc); return; } /* Failure? For now, warn loudly and continue */ ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: TID=%d, failed to TX BAR, continue!\n", __func__, tid->an->an_node.ni_macaddr, ":", tid->tid); ath_tx_tid_bar_unsuspend(sc, tid); } static void ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); /* * If the current TID is running AMPDU, update * the BAW. */ if (ath_tx_ampdu_running(sc, an, tid->tid) && bf->bf_state.bfs_dobaw) { /* * Only remove the frame from the BAW if it's * been transmitted at least once; this means * the frame was in the BAW to begin with. */ if (bf->bf_state.bfs_retries > 0) { ath_tx_update_baw(sc, an, tid, bf); bf->bf_state.bfs_dobaw = 0; } #if 0 /* * This has become a non-fatal error now */ if (! bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); #endif } /* Strip it out of an aggregate list if it was in one */ bf->bf_next = NULL; /* Insert on the free queue to be freed by the caller */ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); } static void ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, const char *pfx, struct ath_tid *tid, struct ath_buf *bf) { struct ieee80211_node *ni = &an->an_node; struct ath_txq *txq; struct ieee80211_tx_ampdu *tap; txq = sc->sc_ac2q[tid->ac]; tap = ath_tx_get_tx_tid(an, tid->tid); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, " "seqno=%d, retry=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, bf->bf_state.bfs_addedbaw, bf->bf_state.bfs_dobaw, SEQNO(bf->bf_state.bfs_seqno), bf->bf_state.bfs_retries); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, txq->axq_qnum, txq->axq_depth, txq->axq_aggr_depth); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, " "isfiltered=%d\n", __func__, pfx, ni->ni_macaddr, ":", bf, tid->axq_depth, tid->hwq_depth, tid->bar_wait, tid->isfiltered); DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET, "%s: %s: %6D: tid %d: " "sched=%d, paused=%d, " "incomp=%d, baw_head=%d, " "baw_tail=%d txa_start=%d, ni_txseqs=%d\n", __func__, pfx, ni->ni_macaddr, ":", tid->tid, tid->sched, tid->paused, tid->incomp, tid->baw_head, tid->baw_tail, tap == NULL ? -1 : tap->txa_start, ni->ni_txseqs[tid->tid]); /* XXX Dump the frame, see what it is? */ if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ni->ni_ic, mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 0, -1); } /* * Free any packets currently pending in the software TX queue. * * This will be called when a node is being deleted. * * It can also be called on an active node during an interface * reset or state transition. * * (From Linux/reference): * * TODO: For frame(s) that are in the retry state, we will reuse the * sequence number(s) without setting the retry bit. The * alternative is to give up on these and BAR the receiver's window * forward. */ static void ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq) { struct ath_buf *bf; struct ieee80211_tx_ampdu *tap; struct ieee80211_node *ni = &an->an_node; int t; tap = ath_tx_get_tx_tid(an, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* Walk the queue, free frames */ t = 0; for (;;) { bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } if (t == 0) { ath_tx_tid_drain_print(sc, an, "norm", tid, bf); // t = 1; } ATH_TID_REMOVE(tid, bf, bf_list); ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); } /* And now, drain the filtered frame queue */ t = 0; for (;;) { bf = ATH_TID_FILT_FIRST(tid); if (bf == NULL) break; if (t == 0) { ath_tx_tid_drain_print(sc, an, "filt", tid, bf); // t = 1; } ATH_TID_FILT_REMOVE(tid, bf, bf_list); ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf); } /* * Override the clrdmask configuration for the next frame * in case there is some future transmission, just to get * the ball rolling. * * This won't hurt things if the TID is about to be freed. */ ath_tx_set_clrdmask(sc, tid->an); /* * Now that it's completed, grab the TID lock and update * the sequence number and BAW window. * Because sequence numbers have been assigned to frames * that haven't been sent yet, it's entirely possible * we'll be called with some pending frames that have not * been transmitted. * * The cleaner solution is to do the sequence number allocation * when the packet is first transmitted - and thus the "retries" * check above would be enough to update the BAW/seqno. */ /* But don't do it for non-QoS TIDs */ if (tap) { #if 1 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n", __func__, ni->ni_macaddr, ":", an, tid->tid, tap->txa_start); #endif ni->ni_txseqs[tid->tid] = tap->txa_start; tid->baw_tail = tid->baw_head; } } /* * Reset the TID state. This must be only called once the node has * had its frames flushed from this TID, to ensure that no other * pause / unpause logic can kick in. */ static void ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid) { #if 0 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; tid->paused = tid->sched = tid->addba_tx_pending = 0; tid->incomp = tid->cleanup_inprogress = 0; #endif /* * If we have a bar_wait set, we need to unpause the TID * here. Otherwise once cleanup has finished, the TID won't * have the right paused counter. * * XXX I'm not going through resume here - I don't want the * node to be rescheuled just yet. This however should be * methodized! */ if (tid->bar_wait) { if (tid->paused > 0) { tid->paused --; } } /* * XXX same with a currently filtered TID. * * Since this is being called during a flush, we assume that * the filtered frame list is actually empty. * * XXX TODO: add in a check to ensure that the filtered queue * depth is actually 0! */ if (tid->isfiltered) { if (tid->paused > 0) { tid->paused --; } } /* * Clear BAR, filtered frames, scheduled and ADDBA pending. * The TID may be going through cleanup from the last association * where things in the BAW are still in the hardware queue. */ tid->bar_wait = 0; tid->bar_tx = 0; tid->isfiltered = 0; tid->sched = 0; tid->addba_tx_pending = 0; /* * XXX TODO: it may just be enough to walk the HWQs and mark * frames for that node as non-aggregate; or mark the ath_node * with something that indicates that aggregation is no longer * occurring. Then we can just toss the BAW complaints and * do a complete hard reset of state here - no pause, no * complete counter, etc. */ } /* * Flush all software queued packets for the given node. * * This occurs when a completion handler frees the last buffer * for a node, and the node is thus freed. This causes the node * to be cleaned up, which ends up calling ath_tx_node_flush. */ void ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an) { int tid; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p", &an->an_node); ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, " "swq_depth=%d, clrdmask=%d, leak_count=%d\n", __func__, an->an_node.ni_macaddr, ":", an->an_is_powersave, an->an_stack_psq, an->an_tim_set, an->an_swq_depth, an->clrdmask, an->an_leak_count); for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { struct ath_tid *atid = &an->an_tid[tid]; /* Free packets */ ath_tx_tid_drain(sc, an, atid, &bf_cq); /* Remove this tid from the list of active tids */ ath_tx_tid_unsched(sc, atid); /* Reset the per-TID pause, BAR, etc state */ ath_tx_tid_reset(sc, atid); } /* * Clear global leak count */ an->an_leak_count = 0; ATH_TX_UNLOCK(sc); /* Handle completed frames */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Drain all the software TXQs currently with traffic queued. */ void ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq) { struct ath_tid *tid; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_TX_LOCK(sc); /* * Iterate over all active tids for the given txq, * flushing and unsched'ing them */ while (! TAILQ_EMPTY(&txq->axq_tidq)) { tid = TAILQ_FIRST(&txq->axq_tidq); ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); ath_tx_tid_unsched(sc, tid); } ATH_TX_UNLOCK(sc); while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle completion of non-aggregate session frames. * * This (currently) doesn't implement software retransmission of * non-aggregate frames! * * Software retransmission of non-aggregate frames needs to obey * the strict sequence number ordering, and drop any frames that * will fail this. * * For now, filtered frames and frame transmission will cause * all kinds of issues. So we don't support them. * * So anyone queuing frames via ath_tx_normal_xmit() or * ath_tx_hw_queue_norm() must override and set CLRDMASK. */ void ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status *ts = &bf->bf_status.ds_txstat; /* The TID state is protected behind the TXQ lock */ ATH_TX_LOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n", __func__, bf, fail, atid->hwq_depth - 1); atid->hwq_depth--; #if 0 /* * If the frame was filtered, stick it on the filter frame * queue and complain about it. It shouldn't happen! */ if ((ts->ts_status & HAL_TXERR_FILT) || (ts->ts_status != 0 && atid->isfiltered)) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=%d, ts_status=%d: huh?\n", __func__, atid->isfiltered, ts->ts_status); ath_tx_tid_filt_comp_buf(sc, atid, bf); } #endif if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__); if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* If the TID is being cleaned up, track things */ /* XXX refactor! */ if (atid->cleanup_inprogress) { atid->incomp--; if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } } /* * If the queue is filtered, potentially mark it as complete * and reschedule it as needed. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); ATH_TX_UNLOCK(sc); /* * punt to rate control if we're not being cleaned up * during a hw queue drain and the frame wanted an ACK. */ if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, ts, bf->bf_state.bfs_pktlen, 1, (ts->ts_status == 0) ? 0 : 1); ath_tx_default_comp(sc, bf, fail); } /* * Handle cleanup of aggregate session packets that aren't * an A-MPDU. * * There's no need to update the BAW here - the session is being * torn down. */ static void ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n", __func__, tid, atid->incomp); ATH_TX_LOCK(sc); atid->incomp--; /* XXX refactor! */ if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, 0); } /* * This as it currently stands is a bit dumb. Ideally we'd just * fail the frame the normal way and have it permanently fail * via the normal aggregate completion path. */ static void ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq) { struct ath_tid *atid = &an->an_tid[tid]; struct ath_buf *bf, *bf_next; ATH_TX_LOCK_ASSERT(sc); /* * Remove this frame from the queue. */ ATH_TID_REMOVE(atid, bf_head, bf_list); /* * Loop over all the frames in the aggregate. */ bf = bf_head; while (bf != NULL) { bf_next = bf->bf_next; /* next aggregate frame, or NULL */ /* * If it's been added to the BAW we need to kick * it out of the BAW before we continue. * * XXX if it's an aggregate, assert that it's in the * BAW - we shouldn't have it be in an aggregate * otherwise! */ if (bf->bf_state.bfs_addedbaw) { ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; } /* * Give it the default completion handler. */ bf->bf_comp = ath_tx_normal_comp; bf->bf_next = NULL; /* * Add it to the list to free. */ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); /* * Now advance to the next frame in the aggregate. */ bf = bf_next; } } /* * Performs transmit side cleanup when TID changes from aggregated to * unaggregated and during reassociation. * * For now, this just tosses everything from the TID software queue * whether or not it has been retried and marks the TID as * pending completion if there's anything for this TID queued to * the hardware. * * The caller is responsible for pausing the TID and unpausing the * TID if no cleanup was required. Otherwise the cleanup path will * unpause the TID once the last hardware queued frame is completed. */ static void ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, ath_bufhead *bf_cq) { struct ath_tid *atid = &an->an_tid[tid]; struct ath_buf *bf, *bf_next; ATH_TX_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: TID %d: called; inprogress=%d\n", __func__, tid, atid->cleanup_inprogress); /* * Move the filtered frames to the TX queue, before * we run off and discard/process things. */ /* XXX this is really quite inefficient */ while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) { ATH_TID_FILT_REMOVE(atid, bf, bf_list); ATH_TID_INSERT_HEAD(atid, bf, bf_list); } /* * Update the frames in the software TX queue: * * + Discard retry frames in the queue * + Fix the completion function to be non-aggregate */ bf = ATH_TID_FIRST(atid); while (bf) { /* * Grab the next frame in the list, we may * be fiddling with the list. */ bf_next = TAILQ_NEXT(bf, bf_list); /* * Free the frame and all subframes. */ ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq); /* * Next frame! */ bf = bf_next; } /* * If there's anything in the hardware queue we wait * for the TID HWQ to empty. */ if (atid->hwq_depth > 0) { /* * XXX how about we kill atid->incomp, and instead * replace it with a macro that checks that atid->hwq_depth * is 0? */ atid->incomp = atid->hwq_depth; atid->cleanup_inprogress = 1; } if (atid->cleanup_inprogress) DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleanup needed: %d packets\n", __func__, tid, atid->incomp); /* Owner now must free completed frames */ } static struct ath_buf * ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf) { struct ath_buf *nbf; int error; /* * Clone the buffer. This will handle the dma unmap and * copy the node reference to the new buffer. If this * works out, 'bf' will have no DMA mapping, no mbuf * pointer and no node reference. */ nbf = ath_buf_clone(sc, bf); #if 0 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n", __func__); #endif if (nbf == NULL) { /* Failed to clone */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: failed to clone a busy buffer\n", __func__); return NULL; } /* Setup the dma for the new buffer */ error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); if (error != 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: failed to setup dma for clone\n", __func__); /* * Put this at the head of the list, not tail; * that way it doesn't interfere with the * busy buffer logic (which uses the tail of * the list.) */ ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, nbf); ATH_TXBUF_UNLOCK(sc); return NULL; } /* Update BAW if required, before we free the original buf */ if (bf->bf_state.bfs_dobaw) ath_tx_switch_baw_buf(sc, an, tid, bf, nbf); /* Free original buffer; return new buffer */ ath_freebuf(sc, bf); return nbf; } /* * Handle retrying an unaggregate frame in an aggregate * session. * * If too many retries occur, pause the TID, wait for * any further retransmits (as there's no reason why * non-aggregate frames in an aggregate session are * transmitted in-order; they just have to be in-BAW) * and then queue a BAR. */ static void ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ieee80211_tx_ampdu *tap; ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid); /* * If the buffer is marked as busy, we can't directly * reuse it. Instead, try to clone the buffer. * If the clone is successful, recycle the old buffer. * If the clone is unsuccessful, set bfs_retries to max * to force the next bit of code to free the buffer * for us. */ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && (bf->bf_flags & ATH_BUF_BUSY)) { struct ath_buf *nbf; nbf = ath_tx_retry_clone(sc, an, atid, bf); if (nbf) /* bf has been freed at this point */ bf = nbf; else bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; } if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, "%s: exceeded retries; seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); sc->sc_stats.ast_tx_swretrymax++; /* Update BAW anyway */ if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (! bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; /* Suspend the TX queue and get ready to send the BAR */ ath_tx_tid_bar_suspend(sc, atid); /* Send the BAR if there are no other frames waiting */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Free buffer, bf is free after this call */ ath_tx_default_comp(sc, bf, 0); return; } /* * This increments the retry counter as well as * sets the retry flag in the ath_buf and packet * body. */ ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swretries++; /* * Insert this at the head of the queue, so it's * retried before any current/subsequent frames. */ ATH_TID_INSERT_HEAD(atid, bf, bf_list); ath_tx_tid_sched(sc, atid); /* Send the BAR if there are no other frames waiting */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); } /* * Common code for aggregate excessive retry/subframe retry. * If retrying, queues buffers to bf_q. If not, frees the * buffers. * * XXX should unify this with ath_tx_aggr_retry_unaggr() */ static int ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, ath_bufhead *bf_q) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; ATH_TX_LOCK_ASSERT(sc); /* XXX clr11naggr should be done for all subframes */ ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ /* * If the buffer is marked as busy, we can't directly * reuse it. Instead, try to clone the buffer. * If the clone is successful, recycle the old buffer. * If the clone is unsuccessful, set bfs_retries to max * to force the next bit of code to free the buffer * for us. */ if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && (bf->bf_flags & ATH_BUF_BUSY)) { struct ath_buf *nbf; nbf = ath_tx_retry_clone(sc, an, atid, bf); if (nbf) /* bf has been freed at this point */ bf = nbf; else bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; } if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { sc->sc_stats.ast_tx_swretrymax++; DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES, "%s: max retries: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_BAW, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); bf->bf_state.bfs_dobaw = 0; return 1; } ath_tx_set_retry(sc, bf); sc->sc_stats.ast_tx_swretries++; bf->bf_next = NULL; /* Just to make sure */ /* Clear the aggregate state */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ bf->bf_state.bfs_nframes = 1; TAILQ_INSERT_TAIL(bf_q, bf, bf_list); return 0; } /* * error pkt completion for an aggregate destination */ static void ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, struct ath_tid *tid) { struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); struct ath_buf *bf_next, *bf; ath_bufhead bf_q; int drops = 0; struct ieee80211_tx_ampdu *tap; ath_bufhead bf_cq; TAILQ_INIT(&bf_q); TAILQ_INIT(&bf_cq); /* * Update rate control - all frames have failed. * * XXX use the length in the first frame in the series; * XXX just so things are consistent for now. */ ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, &bf_first->bf_status.ds_txstat, bf_first->bf_state.bfs_pktlen, bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); ATH_TX_LOCK(sc); tap = ath_tx_get_tx_tid(an, tid->tid); sc->sc_stats.ast_tx_aggr_failall++; /* Retry all subframes */ bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ sc->sc_stats.ast_tx_aggr_fail++; if (ath_tx_retry_subframe(sc, bf, &bf_q)) { drops++; bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } bf = bf_next; } /* Prepend all frames to the beginning of the queue */ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { TAILQ_REMOVE(&bf_q, bf, bf_list); ATH_TID_INSERT_HEAD(tid, bf, bf_list); } /* * Schedule the TID to be re-tried. */ ath_tx_tid_sched(sc, tid); /* * send bar if we dropped any frames * * Keep the txq lock held for now, as we need to ensure * that ni_txseqs[] is consistent (as it's being updated * in the ifnet TX context or raw TX context.) */ if (drops) { /* Suspend the TX queue and get ready to send the BAR */ ath_tx_tid_bar_suspend(sc, tid); } /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, tid)) ath_tx_tid_bar_tx(sc, tid); ATH_TX_UNLOCK(sc); /* Complete frames which errored out */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle clean-up of packets from an aggregate list. * * There's no need to update the BAW here - the session is being * torn down. */ static void ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first) { struct ath_buf *bf, *bf_next; struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf_first->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; ATH_TX_LOCK(sc); /* update incomp */ atid->incomp--; /* Update the BAW */ bf = bf_first; while (bf) { /* XXX refactor! */ if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf = bf->bf_next; } if (atid->incomp == 0) { DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: cleaned up! resume!\n", __func__, tid); atid->cleanup_inprogress = 0; ath_tx_tid_resume(sc, atid); } /* Send BAR if required */ /* XXX why would we send a BAR when transitioning to non-aggregation? */ /* * XXX TODO: we should likely just tear down the BAR state here, * rather than sending a BAR. */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Handle frame completion as individual frames */ bf = bf_first; while (bf) { bf_next = bf->bf_next; bf->bf_next = NULL; ath_tx_default_comp(sc, bf, 1); bf = bf_next; } } /* * Handle completion of an set of aggregate frames. * * Note: the completion handler is the last descriptor in the aggregate, * not the last descriptor in the first frame. */ static void ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, int fail) { //struct ath_desc *ds = bf->bf_lastds; struct ieee80211_node *ni = bf_first->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf_first->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status ts; struct ieee80211_tx_ampdu *tap; ath_bufhead bf_q; ath_bufhead bf_cq; int seq_st, tx_ok; int hasba, isaggr; uint32_t ba[2]; struct ath_buf *bf, *bf_next; int ba_index; int drops = 0; int nframes = 0, nbad = 0, nf; int pktlen; /* XXX there's too much on the stack? */ struct ath_rc_series rc[ATH_RC_NUM]; int txseq; DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n", __func__, atid->hwq_depth); /* * Take a copy; this may be needed -after- bf_first * has been completed and freed. */ ts = bf_first->bf_status.ds_txstat; TAILQ_INIT(&bf_q); TAILQ_INIT(&bf_cq); /* The TID state is kept behind the TXQ lock */ ATH_TX_LOCK(sc); atid->hwq_depth--; if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* * If the TID is filtered, handle completing the filter * transition before potentially kicking it to the cleanup * function. * * XXX this is duplicate work, ew. */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * Punt cleanup to the relevant function, not our problem now */ if (atid->cleanup_inprogress) { if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: isfiltered=1, normal_comp?\n", __func__); ATH_TX_UNLOCK(sc); ath_tx_comp_cleanup_aggr(sc, bf_first); return; } /* * If the frame is filtered, transition to filtered frame * mode and add this to the filtered frame list. * * XXX TODO: figure out how this interoperates with * BAR, pause and cleanup states. */ if ((ts.ts_status & HAL_TXERR_FILT) || (ts.ts_status != 0 && atid->isfiltered)) { if (fail != 0) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: isfiltered=1, fail=%d\n", __func__, fail); ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq); /* Remove from BAW */ TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) { if (bf->bf_state.bfs_addedbaw) drops++; if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; } /* * If any intermediate frames in the BAW were dropped when * handling filtering things, send a BAR. */ if (drops) ath_tx_tid_bar_suspend(sc, atid); /* * Finish up by sending a BAR if required and freeing * the frames outside of the TX lock. */ goto finish_send_bar; } /* * XXX for now, use the first frame in the aggregate for * XXX rate control completion; it's at least consistent. */ pktlen = bf_first->bf_state.bfs_pktlen; /* * Handle errors first! * * Here, handle _any_ error as a "exceeded retries" error. * Later on (when filtered frames are to be specially handled) * it'll have to be expanded. */ #if 0 if (ts.ts_status & HAL_TXERR_XRETRY) { #endif if (ts.ts_status != 0) { ATH_TX_UNLOCK(sc); ath_tx_comp_aggr_error(sc, bf_first, atid); return; } tap = ath_tx_get_tx_tid(an, tid); /* * extract starting sequence and block-ack bitmap */ /* XXX endian-ness of seq_st, ba? */ seq_st = ts.ts_seqnum; hasba = !! (ts.ts_flags & HAL_TX_BA); tx_ok = (ts.ts_status == 0); isaggr = bf_first->bf_state.bfs_aggr; ba[0] = ts.ts_ba_low; ba[1] = ts.ts_ba_high; /* * Copy the TX completion status and the rate control * series from the first descriptor, as it may be freed * before the rate control code can get its grubby fingers * into things. */ memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc)); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, " "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n", __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags, isaggr, seq_st, hasba, ba[0], ba[1]); /* * The reference driver doesn't do this; it simply ignores * this check in its entirety. * * I've seen this occur when using iperf to send traffic * out tid 1 - the aggregate frames are all marked as TID 1, * but the TXSTATUS has TID=0. So, let's just ignore this * check. */ #if 0 /* Occasionally, the MAC sends a tx status for the wrong TID. */ if (tid != ts.ts_tid) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n", __func__, tid, ts.ts_tid); tx_ok = 0; } #endif /* AR5416 BA bug; this requires an interface reset */ if (isaggr && tx_ok && (! hasba)) { device_printf(sc->sc_dev, "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, " "seq_st=%d\n", __func__, hasba, tx_ok, isaggr, seq_st); /* XXX TODO: schedule an interface reset */ #ifdef ATH_DEBUG ath_printtxbuf(sc, bf_first, sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0); #endif } /* * Walk the list of frames, figure out which ones were correctly * sent and which weren't. */ bf = bf_first; nf = bf_first->bf_state.bfs_nframes; /* bf_first is going to be invalid once this list is walked */ bf_first = NULL; /* * Walk the list of completed frames and determine * which need to be completed and which need to be * retransmitted. * * For completed frames, the completion functions need * to be called at the end of this function as the last * node reference may free the node. * * Finally, since the TXQ lock can't be held during the * completion callback (to avoid lock recursion), * the completion calls have to be done outside of the * lock. */ while (bf) { nframes++; ba_index = ATH_BA_INDEX(seq_st, SEQNO(bf->bf_state.bfs_seqno)); bf_next = bf->bf_next; bf->bf_next = NULL; /* Remove it from the aggr list */ DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: checking bf=%p seqno=%d; ack=%d\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno), ATH_BA_ISSET(ba, ba_index)); if (tx_ok && ATH_BA_ISSET(ba, ba_index)) { sc->sc_stats.ast_tx_aggr_ok++; ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } else { sc->sc_stats.ast_tx_aggr_fail++; if (ath_tx_retry_subframe(sc, bf, &bf_q)) { drops++; bf->bf_next = NULL; TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list); } nbad++; } bf = bf_next; } /* * Now that the BAW updates have been done, unlock * * txseq is grabbed before the lock is released so we * have a consistent view of what -was- in the BAW. * Anything after this point will not yet have been * TXed. */ txseq = tap->txa_start; ATH_TX_UNLOCK(sc); if (nframes != nf) DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: num frames seen=%d; bf nframes=%d\n", __func__, nframes, nf); /* * Now we know how many frames were bad, call the rate * control code. */ if (fail == 0) ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes, nbad); /* * send bar if we dropped any frames */ if (drops) { /* Suspend the TX queue and get ready to send the BAR */ ATH_TX_LOCK(sc); ath_tx_tid_bar_suspend(sc, atid); ATH_TX_UNLOCK(sc); } DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: txa_start now %d\n", __func__, tap->txa_start); ATH_TX_LOCK(sc); /* Prepend all frames to the beginning of the queue */ while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) { TAILQ_REMOVE(&bf_q, bf, bf_list); ATH_TID_INSERT_HEAD(atid, bf, bf_list); } /* * Reschedule to grab some further frames. */ ath_tx_tid_sched(sc, atid); /* * If the queue is filtered, re-schedule as required. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); finish_send_bar: /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* Do deferred completion */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 0); } } /* * Handle completion of unaggregated frames in an ADDBA * session. * * Fail is set to 1 if the entry is being freed via a call to * ath_tx_draintxq(). */ static void ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ieee80211_node *ni = bf->bf_node; struct ath_node *an = ATH_NODE(ni); int tid = bf->bf_state.bfs_tid; struct ath_tid *atid = &an->an_tid[tid]; struct ath_tx_status ts; int drops = 0; /* * Take a copy of this; filtering/cloning the frame may free the * bf pointer. */ ts = bf->bf_status.ds_txstat; /* * Update rate control status here, before we possibly * punt to retry or cleanup. * * Do it outside of the TXQ lock. */ if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, &bf->bf_status.ds_txstat, bf->bf_state.bfs_pktlen, 1, (ts.ts_status == 0) ? 0 : 1); /* * This is called early so atid->hwq_depth can be tracked. * This unfortunately means that it's released and regrabbed * during retry and cleanup. That's rather inefficient. */ ATH_TX_LOCK(sc); if (tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n", __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth, SEQNO(bf->bf_state.bfs_seqno)); atid->hwq_depth--; if (atid->hwq_depth < 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n", __func__, atid->hwq_depth); /* * If the TID is filtered, handle completing the filter * transition before potentially kicking it to the cleanup * function. */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * If a cleanup is in progress, punt to comp_cleanup; * rather than handling it here. It's thus their * responsibility to clean up, call the completion * function in net80211, etc. */ if (atid->cleanup_inprogress) { if (atid->isfiltered) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=1, normal_comp?\n", __func__); ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n", __func__); ath_tx_comp_cleanup_unaggr(sc, bf); return; } /* * XXX TODO: how does cleanup, BAR and filtered frame handling * overlap? * * If the frame is filtered OR if it's any failure but * the TID is filtered, the frame must be added to the * filtered frame list. * * However - a busy buffer can't be added to the filtered * list as it will end up being recycled without having * been made available for the hardware. */ if ((ts.ts_status & HAL_TXERR_FILT) || (ts.ts_status != 0 && atid->isfiltered)) { int freeframe; if (fail != 0) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: isfiltered=1, fail=%d\n", __func__, fail); freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf); /* * If freeframe=0 then bf is no longer ours; don't * touch it. */ if (freeframe) { /* Remove from BAW */ if (bf->bf_state.bfs_addedbaw) drops++; if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } bf->bf_state.bfs_dobaw = 0; } /* * If the frame couldn't be filtered, treat it as a drop and * prepare to send a BAR. */ if (freeframe && drops) ath_tx_tid_bar_suspend(sc, atid); /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); /* * If freeframe is set, then the frame couldn't be * cloned and bf is still valid. Just complete/free it. */ if (freeframe) ath_tx_default_comp(sc, bf, fail); return; } /* * Don't bother with the retry check if all frames * are being failed (eg during queue deletion.) */ #if 0 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) { #endif if (fail == 0 && ts.ts_status != 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n", __func__); ath_tx_aggr_retry_unaggr(sc, bf); return; } /* Success? Complete */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n", __func__, tid, SEQNO(bf->bf_state.bfs_seqno)); if (bf->bf_state.bfs_dobaw) { ath_tx_update_baw(sc, an, atid, bf); bf->bf_state.bfs_dobaw = 0; if (!bf->bf_state.bfs_addedbaw) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: wasn't added: seqno %d\n", __func__, SEQNO(bf->bf_state.bfs_seqno)); } /* * If the queue is filtered, re-schedule as required. * * This is required as there may be a subsequent TX descriptor * for this end-node that has CLRDMASK set, so it's quite possible * that a filtered frame will be followed by a non-filtered * (complete or otherwise) frame. * * XXX should we do this before we complete the frame? */ if (atid->isfiltered) ath_tx_tid_filt_comp_complete(sc, atid); /* * Send BAR if required */ if (ath_tx_tid_bar_tx_ready(sc, atid)) ath_tx_tid_bar_tx(sc, atid); ATH_TX_UNLOCK(sc); ath_tx_default_comp(sc, bf, fail); /* bf is freed at this point */ } void ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { if (bf->bf_state.bfs_aggr) ath_tx_aggr_comp_aggr(sc, bf, fail); else ath_tx_aggr_comp_unaggr(sc, bf, fail); } /* * Schedule some packets from the given node/TID to the hardware. * * This is the aggregate version. */ void ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid) { struct ath_buf *bf; struct ath_txq *txq = sc->sc_ac2q[tid->ac]; struct ieee80211_tx_ampdu *tap; ATH_AGGR_STATUS status; ath_bufhead bf_q; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* * XXX TODO: If we're called for a queue that we're leaking frames to, * ensure we only leak one. */ tap = ath_tx_get_tx_tid(an, tid->tid); if (tid->tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: called for TID=NONQOS_TID?\n", __func__); for (;;) { status = ATH_AGGR_DONE; /* * If the upper layer has paused the TID, don't * queue any further packets. * * This can also occur from the completion task because * of packet loss; but as its serialised with this code, * it won't "appear" half way through queuing packets. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) break; bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } /* * If the packet doesn't fall within the BAW (eg a NULL * data frame), schedule it directly; continue. */ if (! bf->bf_state.bfs_dobaw) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: non-baw packet\n", __func__); ATH_TID_REMOVE(tid, bf, bf_list); if (bf->bf_state.bfs_nframes > 1) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: aggr=%d, nframes=%d\n", __func__, bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); /* * This shouldn't happen - such frames shouldn't * ever have been queued as an aggregate in the * first place. However, make sure the fields * are correctly setup just to be totally sure. */ bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_nframes = 1; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); sc->sc_aggr_stats.aggr_nonbaw_pkt++; /* Queue the packet; continue */ goto queuepkt; } TAILQ_INIT(&bf_q); /* * Do a rate control lookup on the first frame in the * list. The rate control code needs that to occur * before it can determine whether to TX. * It's inaccurate because the rate control code doesn't * really "do" aggregate lookups, so it only considers * the size of the first frame. */ ath_tx_do_ratelookup(sc, bf); bf->bf_state.bfs_rc[3].rix = 0; bf->bf_state.bfs_rc[3].tries = 0; ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); status = ath_tx_form_aggr(sc, an, tid, &bf_q); DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: ath_tx_form_aggr() status=%d\n", __func__, status); /* * No frames to be picked up - out of BAW */ if (TAILQ_EMPTY(&bf_q)) break; /* * This assumes that the descriptor list in the ath_bufhead * are already linked together via bf_next pointers. */ bf = TAILQ_FIRST(&bf_q); if (status == ATH_AGGR_8K_LIMITED) sc->sc_aggr_stats.aggr_rts_aggr_limited++; /* * If it's the only frame send as non-aggregate * assume that ath_tx_form_aggr() has checked * whether it's in the BAW and added it appropriately. */ if (bf->bf_state.bfs_nframes == 1) { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: single-frame aggregate\n", __func__); /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); bf->bf_state.bfs_aggr = 0; bf->bf_state.bfs_ndelim = 0; ath_tx_setds(sc, bf); ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); if (status == ATH_AGGR_BAW_CLOSED) sc->sc_aggr_stats.aggr_baw_closed_single_pkt++; else sc->sc_aggr_stats.aggr_single_pkt++; } else { DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: multi-frame aggregate: %d frames, " "length %d\n", __func__, bf->bf_state.bfs_nframes, bf->bf_state.bfs_al); bf->bf_state.bfs_aggr = 1; sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++; sc->sc_aggr_stats.aggr_aggr_pkt++; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* * Calculate the duration/protection as required. */ ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); /* * Update the rate and rtscts information based on the * rate decision made by the rate control code; * the first frame in the aggregate needs it. */ ath_tx_set_rtscts(sc, bf); /* * Setup the relevant descriptor fields * for aggregation. The first descriptor * already points to the rest in the chain. */ ath_tx_setds_11n(sc, bf); } queuepkt: /* Set completion handler, multi-frame aggregate or not */ bf->bf_comp = ath_tx_aggr_comp; if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__); /* * Update leak count and frame config if were leaking frames. * * XXX TODO: it should update all frames in an aggregate * correctly! */ ath_tx_leak_count_update(sc, tid, bf); /* Punt to txq */ ath_tx_handoff(sc, txq, bf); /* Track outstanding buffer count to hardware */ /* aggregates are "one" buffer */ tid->hwq_depth++; /* * Break out if ath_tx_form_aggr() indicated * there can't be any further progress (eg BAW is full.) * Checking for an empty txq is done above. * * XXX locking on txq here? */ /* XXX TXQ locking */ if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr || (status == ATH_AGGR_BAW_CLOSED || status == ATH_AGGR_LEAK_CLOSED)) break; } } /* * Schedule some packets from the given node/TID to the hardware. * * XXX TODO: this routine doesn't enforce the maximum TXQ depth. * It just dumps frames into the TXQ. We should limit how deep * the transmit queue can grow for frames dispatched to the given * TXQ. * * To avoid locking issues, either we need to own the TXQ lock * at this point, or we need to pass in the maximum frame count * from the caller. */ void ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid) { struct ath_buf *bf; struct ath_txq *txq = sc->sc_ac2q[tid->ac]; DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n", __func__, an, tid->tid); ATH_TX_LOCK_ASSERT(sc); /* Check - is AMPDU pending or running? then print out something */ if (ath_tx_ampdu_pending(sc, an, tid->tid)) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n", __func__, tid->tid); if (ath_tx_ampdu_running(sc, an, tid->tid)) DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n", __func__, tid->tid); for (;;) { /* * If the upper layers have paused the TID, don't * queue any further packets. * * XXX if we are leaking frames, make sure we decrement * that counter _and_ we continue here. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) break; bf = ATH_TID_FIRST(tid); if (bf == NULL) { break; } ATH_TID_REMOVE(tid, bf, bf_list); /* Sanity check! */ if (tid->tid != bf->bf_state.bfs_tid) { DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !=" " tid %d\n", __func__, bf->bf_state.bfs_tid, tid->tid); } /* Normal completion handler */ bf->bf_comp = ath_tx_normal_comp; /* * Override this for now, until the non-aggregate * completion handler correctly handles software retransmits. */ bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; /* Update CLRDMASK just before this frame is queued */ ath_tx_update_clrdmask(sc, tid, bf); /* Program descriptors + rate control */ ath_tx_do_ratelookup(sc, bf); ath_tx_calc_duration(sc, bf); ath_tx_calc_protection(sc, bf); ath_tx_set_rtscts(sc, bf); ath_tx_rate_fill_rcflags(sc, bf); ath_tx_setds(sc, bf); /* * Update the current leak count if * we're leaking frames; and set the * MORE flag as appropriate. */ ath_tx_leak_count_update(sc, tid, bf); /* Track outstanding buffer count to hardware */ /* aggregates are "one" buffer */ tid->hwq_depth++; /* Punt to hardware or software txq */ ath_tx_handoff(sc, txq, bf); } } /* * Schedule some packets to the given hardware queue. * * This function walks the list of TIDs (ie, ath_node TIDs * with queued traffic) and attempts to schedule traffic * from them. * * TID scheduling is implemented as a FIFO, with TIDs being * added to the end of the queue after some frames have been * scheduled. */ void ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq) { struct ath_tid *tid, *next, *last; ATH_TX_LOCK_ASSERT(sc); /* - * Don't schedule if the hardware queue is busy. - * This (hopefully) gives some more time to aggregate - * some packets in the aggregation queue. + * For non-EDMA chips, aggr frames that have been built are + * in axq_aggr_depth, whether they've been scheduled or not. + * There's no FIFO, so txq->axq_depth is what's been scheduled + * to the hardware. * - * XXX It doesn't stop a parallel sender from sneaking - * in transmitting a frame! + * For EDMA chips, we do it in two stages. The existing code + * builds a list of frames to go to the hardware and the EDMA + * code turns it into a single entry to push into the FIFO. + * That way we don't take up one packet per FIFO slot. + * We do push one aggregate per FIFO slot though, just to keep + * things simple. + * + * The FIFO depth is what's in the hardware; the txq->axq_depth + * is what's been scheduled to the FIFO. + * + * fifo.axq_depth is the number of frames (or aggregates) pushed + * into the EDMA FIFO. For multi-frame lists, this is the number + * of frames pushed in. + * axq_fifo_depth is the number of FIFO slots currently busy. */ - /* XXX TXQ locking */ - if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { + + /* For EDMA and non-EDMA, check built/scheduled against aggr limit */ + if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) { sc->sc_aggr_stats.aggr_sched_nopkt++; return; } - if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { + + /* + * For non-EDMA chips, axq_depth is the "what's scheduled to + * the hardware list". For EDMA it's "What's built for the hardware" + * and fifo.axq_depth is how many frames have been dispatched + * already to the hardware. + */ + if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) { sc->sc_aggr_stats.aggr_sched_nopkt++; return; } last = TAILQ_LAST(&txq->axq_tidq, axq_t_s); TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) { /* * Suspend paused queues here; they'll be resumed * once the addba completes or times out. */ DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n", __func__, tid->tid, tid->paused); ath_tx_tid_unsched(sc, tid); /* * This node may be in power-save and we're leaking * a frame; be careful. */ if (! ath_tx_tid_can_tx_or_sched(sc, tid)) { goto loop_done; } if (ath_tx_ampdu_running(sc, tid->an, tid->tid)) ath_tx_tid_hw_queue_aggr(sc, tid->an, tid); else ath_tx_tid_hw_queue_norm(sc, tid->an, tid); /* Not empty? Re-schedule */ if (tid->axq_depth != 0) ath_tx_tid_sched(sc, tid); /* * Give the software queue time to aggregate more * packets. If we aren't running aggregation then * we should still limit the hardware queue depth. */ /* XXX TXQ locking */ if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) { break; } if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) { break; } loop_done: /* * If this was the last entry on the original list, stop. * Otherwise nodes that have been rescheduled onto the end * of the TID FIFO list will just keep being rescheduled. * * XXX What should we do about nodes that were paused * but are pending a leaking frame in response to a ps-poll? * They'll be put at the front of the list; so they'll * prematurely trigger this condition! Ew. */ if (tid == last) break; } } /* * TX addba handling */ /* * Return net80211 TID struct pointer, or NULL for none */ struct ieee80211_tx_ampdu * ath_tx_get_tx_tid(struct ath_node *an, int tid) { struct ieee80211_node *ni = &an->an_node; struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return NULL; tap = &ni->ni_tx_ampdu[tid]; return tap; } /* * Is AMPDU-TX running? */ static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid) { struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return 0; tap = ath_tx_get_tx_tid(an, tid); if (tap == NULL) return 0; /* Not valid; default to not running */ return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING); } /* * Is AMPDU-TX negotiation pending? */ static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid) { struct ieee80211_tx_ampdu *tap; if (tid == IEEE80211_NONQOS_TID) return 0; tap = ath_tx_get_tx_tid(an, tid); if (tap == NULL) return 0; /* Not valid; default to not pending */ return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND); } /* * Is AMPDU-TX pending for the given TID? */ /* * Method to handle sending an ADDBA request. * * We tap this so the relevant flags can be set to pause the TID * whilst waiting for the response. * * XXX there's no timeout handler we can override? */ int ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; /* * XXX danger Will Robinson! * * Although the taskqueue may be running and scheduling some more * packets, these should all be _before_ the addba sequence number. * However, net80211 will keep self-assigning sequence numbers * until addba has been negotiated. * * In the past, these packets would be "paused" (which still works * fine, as they're being scheduled to the driver in the same * serialised method which is calling the addba request routine) * and when the aggregation session begins, they'll be dequeued * as aggregate packets and added to the BAW. However, now there's * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these * packets. Thus they never get included in the BAW tracking and * this can cause the initial burst of packets after the addba * negotiation to "hang", as they quickly fall outside the BAW. * * The "eventual" solution should be to tag these packets with * dobaw. Although net80211 has given us a sequence number, * it'll be "after" the left edge of the BAW and thus it'll * fall within it. */ ATH_TX_LOCK(sc); /* * This is a bit annoying. Until net80211 HT code inherits some * (any) locking, we may have this called in parallel BUT only * one response/timeout will be called. Grr. */ if (atid->addba_tx_pending == 0) { ath_tx_tid_pause(sc, atid); atid->addba_tx_pending = 1; } ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n", __func__, ni->ni_macaddr, ":", dialogtoken, baparamset, batimeout); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: txa_start=%d, ni_txseqs=%d\n", __func__, tap->txa_start, ni->ni_txseqs[tid]); return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } /* * Handle an ADDBA response. * * We unpause the queue so TX'ing can resume. * * Any packets TX'ed from this point should be "aggregate" (whether * aggregate or not) so the BAW is updated. * * Note! net80211 keeps self-assigning sequence numbers until * ampdu is negotiated. This means the initially-negotiated BAW left * edge won't match the ni->ni_txseq. * * So, being very dirty, the BAW left edge is "slid" here to match * ni->ni_txseq. * * What likely SHOULD happen is that all packets subsequent to the * addba request should be tagged as aggregate and queued as non-aggregate * frames; thus updating the BAW. For now though, I'll just slide the * window. */ int ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status, int code, int batimeout) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; int r; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__, ni->ni_macaddr, ":", status, code, batimeout); DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: txa_start=%d, ni_txseqs=%d\n", __func__, tap->txa_start, ni->ni_txseqs[tid]); /* * Call this first, so the interface flags get updated * before the TID is unpaused. Otherwise a race condition * exists where the unpaused TID still doesn't yet have * IEEE80211_AGGR_RUNNING set. */ r = sc->sc_addba_response(ni, tap, status, code, batimeout); ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; /* * XXX dirty! * Slide the BAW left edge to wherever net80211 left it for us. * Read above for more information. */ tap->txa_start = ni->ni_txseqs[tid]; ath_tx_tid_resume(sc, atid); ATH_TX_UNLOCK(sc); return r; } /* * Stop ADDBA on a queue. * * This can be called whilst BAR TX is currently active on the queue, * so make sure this is unblocked before continuing. */ void ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; ath_bufhead bf_cq; struct ath_buf *bf; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n", __func__, ni->ni_macaddr, ":"); /* * Pause TID traffic early, so there aren't any races * Unblock the pending BAR held traffic, if it's currently paused. */ ATH_TX_LOCK(sc); ath_tx_tid_pause(sc, atid); if (atid->bar_wait) { /* * bar_unsuspend() expects bar_tx == 1, as it should be * called from the TX completion path. This quietens * the warning. It's cleared for us anyway. */ atid->bar_tx = 1; ath_tx_tid_bar_unsuspend(sc, atid); } ATH_TX_UNLOCK(sc); /* There's no need to hold the TXQ lock here */ sc->sc_addba_stop(ni, tap); /* * ath_tx_tid_cleanup will resume the TID if possible, otherwise * it'll set the cleanup flag, and it'll be unpaused once * things have been cleaned up. */ TAILQ_INIT(&bf_cq); ATH_TX_LOCK(sc); /* * In case there's a followup call to this, only call it * if we don't have a cleanup in progress. * * Since we've paused the queue above, we need to make * sure we unpause if there's already a cleanup in * progress - it means something else is also doing * this stuff, so we don't need to also keep it paused. */ if (atid->cleanup_inprogress) { ath_tx_tid_resume(sc, atid); } else { ath_tx_tid_cleanup(sc, an, tid, &bf_cq); /* * Unpause the TID if no cleanup is required. */ if (! atid->cleanup_inprogress) ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); /* Handle completing frames and fail them */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 1); } } /* * Handle a node reassociation. * * We may have a bunch of frames queued to the hardware; those need * to be marked as cleanup. */ void ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *tid; int i; ath_bufhead bf_cq; struct ath_buf *bf; TAILQ_INIT(&bf_cq); ATH_TX_UNLOCK_ASSERT(sc); ATH_TX_LOCK(sc); for (i = 0; i < IEEE80211_TID_SIZE; i++) { tid = &an->an_tid[i]; if (tid->hwq_depth == 0) continue; DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: TID %d: cleaning up TID\n", __func__, an->an_node.ni_macaddr, ":", i); /* * In case there's a followup call to this, only call it * if we don't have a cleanup in progress. */ if (! tid->cleanup_inprogress) { ath_tx_tid_pause(sc, tid); ath_tx_tid_cleanup(sc, an, i, &bf_cq); /* * Unpause the TID if no cleanup is required. */ if (! tid->cleanup_inprogress) ath_tx_tid_resume(sc, tid); } } ATH_TX_UNLOCK(sc); /* Handle completing frames and fail them */ while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) { TAILQ_REMOVE(&bf_cq, bf, bf_list); ath_tx_default_comp(sc, bf, 1); } } /* * Note: net80211 bar_timeout() doesn't call this function on BAR failure; * it simply tears down the aggregation session. Ew. * * It however will call ieee80211_ampdu_stop() which will call * ic->ic_addba_stop(). * * XXX This uses a hard-coded max BAR count value; the whole * XXX BAR TX success or failure should be better handled! */ void ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; int attempts = tap->txa_attempts; int old_txa_start; DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n", __func__, ni->ni_macaddr, ":", tap->txa_tid, atid->tid, status, attempts, tap->txa_start, tap->txa_seqpending); /* Note: This may update the BAW details */ /* * XXX What if this does slide the BAW along? We need to somehow * XXX either fix things when it does happen, or prevent the * XXX seqpending value to be anything other than exactly what * XXX the hell we want! * * XXX So for now, how I do this inside the TX lock for now * XXX and just correct it afterwards? The below condition should * XXX never happen and if it does I need to fix all kinds of things. */ ATH_TX_LOCK(sc); old_txa_start = tap->txa_start; sc->sc_bar_response(ni, tap, status); if (tap->txa_start != old_txa_start) { device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n", __func__, tid, tap->txa_start, old_txa_start); } tap->txa_start = old_txa_start; ATH_TX_UNLOCK(sc); /* Unpause the TID */ /* * XXX if this is attempt=50, the TID will be downgraded * XXX to a non-aggregate session. So we must unpause the * XXX TID here or it'll never be done. * * Also, don't call it if bar_tx/bar_wait are 0; something * has beaten us to the punch? (XXX figure out what?) */ if (status == 0 || attempts == 50) { ATH_TX_LOCK(sc); if (atid->bar_tx == 0 || atid->bar_wait == 0) DPRINTF(sc, ATH_DEBUG_SW_TX_BAR, "%s: huh? bar_tx=%d, bar_wait=%d\n", __func__, atid->bar_tx, atid->bar_wait); else ath_tx_tid_bar_unsuspend(sc, atid); ATH_TX_UNLOCK(sc); } } /* * This is called whenever the pending ADDBA request times out. * Unpause and reschedule the TID. */ void ath_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ath_softc *sc = ni->ni_ic->ic_softc; int tid = tap->txa_tid; struct ath_node *an = ATH_NODE(ni); struct ath_tid *atid = &an->an_tid[tid]; DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: TID=%d, called; resuming\n", __func__, ni->ni_macaddr, ":", tid); ATH_TX_LOCK(sc); atid->addba_tx_pending = 0; ATH_TX_UNLOCK(sc); /* Note: This updates the aggregate state to (again) pending */ sc->sc_addba_response_timeout(ni, tap); /* Unpause the TID; which reschedules it */ ATH_TX_LOCK(sc); ath_tx_tid_resume(sc, atid); ATH_TX_UNLOCK(sc); } /* * Check if a node is asleep or not. */ int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an) { ATH_TX_LOCK_ASSERT(sc); return (an->an_is_powersave); } /* * Mark a node as currently "in powersaving." * This suspends all traffic on the node. * * This must be called with the node/tx locks free. * * XXX TODO: the locking silliness below is due to how the node * locking currently works. Right now, the node lock is grabbed * to do rate control lookups and these are done with the TX * queue lock held. This means the node lock can't be grabbed * first here or a LOR will occur. * * Eventually (hopefully!) the TX path code will only grab * the TXQ lock when transmitting and the ath_node lock when * doing node/TID operations. There are other complications - * the sched/unsched operations involve walking the per-txq * 'active tid' list and this requires both locks to be held. */ void ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *atid; struct ath_txq *txq; int tid; ATH_TX_UNLOCK_ASSERT(sc); /* Suspend all traffic on the node */ ATH_TX_LOCK(sc); if (an->an_is_powersave) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %6D: node was already asleep!\n", __func__, an->an_node.ni_macaddr, ":"); ATH_TX_UNLOCK(sc); return; } for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; ath_tx_tid_pause(sc, atid); } /* Mark node as in powersaving */ an->an_is_powersave = 1; ATH_TX_UNLOCK(sc); } /* * Mark a node as currently "awake." * This resumes all traffic to the node. */ void ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an) { struct ath_tid *atid; struct ath_txq *txq; int tid; ATH_TX_UNLOCK_ASSERT(sc); ATH_TX_LOCK(sc); /* !? */ if (an->an_is_powersave == 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: an=%p: node was already awake\n", __func__, an); return; } /* Mark node as awake */ an->an_is_powersave = 0; /* * Clear any pending leaked frame requests */ an->an_leak_count = 0; for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) { atid = &an->an_tid[tid]; txq = sc->sc_ac2q[atid->ac]; ath_tx_tid_resume(sc, atid); } ATH_TX_UNLOCK(sc); } static int ath_legacy_dma_txsetup(struct ath_softc *sc) { /* nothing new needed */ return (0); } static int ath_legacy_dma_txteardown(struct ath_softc *sc) { /* nothing new needed */ return (0); } void ath_xmit_setup_legacy(struct ath_softc *sc) { /* * For now, just set the descriptor length to sizeof(ath_desc); * worry about extracting the real length out of the HAL later. */ sc->sc_tx_desclen = sizeof(struct ath_desc); sc->sc_tx_statuslen = sizeof(struct ath_desc); sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */ sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup; sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown; sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func; sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart; sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff; sc->sc_tx.xmit_drain = ath_legacy_tx_drain; } Index: projects/clang400-import/sys/dev/ath/if_ath_tx_edma.c =================================================================== --- projects/clang400-import/sys/dev/ath/if_ath_tx_edma.c (revision 312719) +++ projects/clang400-import/sys/dev/ath/if_ath_tx_edma.c (revision 312720) @@ -1,1061 +1,1080 @@ /*- * Copyright (c) 2012 Adrian Chadd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). * * It's also required for any AH_DEBUG checks in here, eg the * module dependencies. */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ATH_TX99_DIAG #include #endif #include #ifdef ATH_DEBUG_ALQ #include #endif /* * some general macros */ #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) /* * XXX doesn't belong here, and should be tunable */ #define ATH_TXSTATUS_RING_SIZE 512 MALLOC_DECLARE(M_ATHDEV); static void ath_edma_tx_processq(struct ath_softc *sc, int dosched); #ifdef ATH_DEBUG_ALQ static void ath_tx_alq_edma_push(struct ath_softc *sc, int txq, int nframes, int fifo_depth, int frame_cnt) { struct if_ath_alq_tx_fifo_push aq; aq.txq = htobe32(txq); aq.nframes = htobe32(nframes); aq.fifo_depth = htobe32(fifo_depth); aq.frame_cnt = htobe32(frame_cnt); if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TX_FIFO_PUSH, sizeof(aq), (const char *) &aq); } #endif /* ATH_DEBUG_ALQ */ /* * XXX TODO: push an aggregate as a single FIFO slot, even though * it may not meet the TXOP for say, DBA-gated traffic in TDMA mode. * * The TX completion code handles a TX FIFO slot having multiple frames, * aggregate or otherwise, but it may just make things easier to deal * with. * * XXX TODO: track the number of aggregate subframes and put that in the * push alq message. */ static void ath_tx_edma_push_staging_list(struct ath_softc *sc, struct ath_txq *txq, int limit) { struct ath_buf *bf, *bf_last; struct ath_buf *bfi, *bfp; int i, sqdepth; TAILQ_HEAD(axq_q_f_s, ath_buf) sq; ATH_TXQ_LOCK_ASSERT(txq); + DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_TX_PROC, + "%s: called; TXQ=%d, fifo.depth=%d, axq_q empty=%d\n", + __func__, + txq->axq_qnum, + txq->axq_fifo_depth, + !! (TAILQ_EMPTY(&txq->axq_q))); + /* * Don't bother doing any work if it's full. */ if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) return; if (TAILQ_EMPTY(&txq->axq_q)) return; TAILQ_INIT(&sq); /* * First pass - walk sq, queue up to 'limit' entries, * subtract them from the staging queue. */ sqdepth = 0; for (i = 0; i < limit; i++) { /* Grab the head entry */ bf = ATH_TXQ_FIRST(txq); if (bf == NULL) break; ATH_TXQ_REMOVE(txq, bf, bf_list); /* Queue it into our staging list */ TAILQ_INSERT_TAIL(&sq, bf, bf_list); /* Ensure the flags are cleared */ bf->bf_flags &= ~(ATH_BUF_FIFOPTR | ATH_BUF_FIFOEND); sqdepth++; } /* * Ok, so now we have a staging list of up to 'limit' * frames from the txq. Now let's wrap that up * into its own list and pass that to the hardware * as one FIFO entry. */ bf = TAILQ_FIRST(&sq); bf_last = TAILQ_LAST(&sq, axq_q_s); /* * Ok, so here's the gymnastics reqiured to make this * all sensible. */ /* * Tag the first/last buffer appropriately. */ bf->bf_flags |= ATH_BUF_FIFOPTR; bf_last->bf_flags |= ATH_BUF_FIFOEND; /* * Walk the descriptor list and link them appropriately. */ bfp = NULL; TAILQ_FOREACH(bfi, &sq, bf_list) { if (bfp != NULL) { ath_hal_settxdesclink(sc->sc_ah, bfp->bf_lastds, bfi->bf_daddr); } bfp = bfi; } i = 0; TAILQ_FOREACH(bfi, &sq, bf_list) { #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) ath_printtxbuf(sc, bfi, txq->axq_qnum, i, 0); #endif/* ATH_DEBUG */ #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) ath_tx_alq_post(sc, bfi); #endif /* ATH_DEBUG_ALQ */ i++; } /* * We now need to push this set of frames onto the tail * of the FIFO queue. We don't adjust the aggregate * count, only the queue depth counter(s). * We also need to blank the link pointer now. */ TAILQ_CONCAT(&txq->fifo.axq_q, &sq, bf_list); /* Bump total queue tracking in FIFO queue */ txq->fifo.axq_depth += sqdepth; /* Bump FIFO queue */ txq->axq_fifo_depth++; DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_TX_PROC, "%s: queued %d packets; depth=%d, fifo depth=%d\n", __func__, sqdepth, txq->fifo.axq_depth, txq->axq_fifo_depth); /* Push the first entry into the hardware */ ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); /* Push start on the DMA if it's not already started */ ath_hal_txstart(sc->sc_ah, txq->axq_qnum); #ifdef ATH_DEBUG_ALQ ath_tx_alq_edma_push(sc, txq->axq_qnum, sqdepth, txq->axq_fifo_depth, txq->fifo.axq_depth); #endif /* ATH_DEBUG_ALQ */ } #define TX_BATCH_SIZE 32 /* * Push some frames into the TX FIFO if we have space. */ static void ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq) { ATH_TXQ_LOCK_ASSERT(txq); DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called; fifo.depth=%d, fifo depth=%d, depth=%d, aggr_depth=%d\n", __func__, txq->axq_qnum, txq->fifo.axq_depth, txq->axq_fifo_depth, txq->axq_depth, txq->axq_aggr_depth); /* * For now, push up to 32 frames per TX FIFO slot. * If more are in the hardware queue then they'll * get populated when we try to send another frame * or complete a frame - so at most there'll be * 32 non-AMPDU frames per node/TID anyway. * * Note that the hardware staging queue will limit * how many frames in total we will have pushed into * here. * * Later on, we'll want to push less frames into * the TX FIFO since we don't want to necessarily * fill tens or hundreds of milliseconds of potential * frames. * * However, we need more frames right now because of * how the MAC implements the frame scheduling policy. * It only ungates a single FIFO entry at a time, * and will run that until CHNTIME expires or the * end of that FIFO entry descriptor list is reached. * So for TDMA we suffer a big performance penalty - * single TX FIFO entries mean the MAC only sends out * one frame per DBA event, which turned out on average * 6ms per TX frame. * * So, for aggregates it's okay - it'll push two at a * time and this will just do them more efficiently. * For non-aggregates it'll do 4 at a time, up to the * non-aggr limit (non_aggr, which is 32.) They should * be time based rather than a hard count, but I also * do need sleep. */ /* * Do some basic, basic batching to the hardware * queue. * * If we have TX_BATCH_SIZE entries in the staging * queue, then let's try to send them all in one hit. * * Ensure we don't push more than TX_BATCH_SIZE worth * in, otherwise we end up draining 8 slots worth of * 32 frames into the hardware queue and then we don't * attempt to push more frames in until we empty the * FIFO. */ if (txq->axq_depth >= TX_BATCH_SIZE / 2 && txq->fifo.axq_depth <= TX_BATCH_SIZE) { ath_tx_edma_push_staging_list(sc, txq, TX_BATCH_SIZE); } /* * Aggregate check: if we have less than two FIFO slots * busy and we have some aggregate frames, queue it. * * Now, ideally we'd just check to see if the scheduler * has given us aggregate frames and push them into the FIFO * as individual slots, as honestly we should just be pushing * a single aggregate in as one FIFO slot. * * Let's do that next once I know this works. */ else if (txq->axq_aggr_depth > 0 && txq->axq_fifo_depth < 2) ath_tx_edma_push_staging_list(sc, txq, TX_BATCH_SIZE); /* * * If we have less, and the TXFIFO isn't empty, let's * wait until we've finished sending the FIFO. * * If we have less, and the TXFIFO is empty, then * send them. */ else if (txq->axq_fifo_depth == 0) { ath_tx_edma_push_staging_list(sc, txq, TX_BATCH_SIZE); } } /* * Re-initialise the DMA FIFO with the current contents of * said TXQ. * * This should only be called as part of the chip reset path, as it * assumes the FIFO is currently empty. */ static void ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq) { struct ath_buf *bf; int i = 0; int fifostart = 1; int old_fifo_depth; DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n", __func__, txq->axq_qnum); ATH_TXQ_LOCK_ASSERT(txq); /* * Let's log if the tracked FIFO depth doesn't match * what we actually push in. */ old_fifo_depth = txq->axq_fifo_depth; txq->axq_fifo_depth = 0; /* * Walk the FIFO staging list, looking for "head" entries. * Since we may have a partially completed list of frames, * we push the first frame we see into the FIFO and re-mark * it as the head entry. We then skip entries until we see * FIFO end, at which point we get ready to push another * entry into the FIFO. */ TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) { /* * If we're looking for FIFOEND and we haven't found * it, skip. * * If we're looking for FIFOEND and we've found it, * reset for another descriptor. */ #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0); #endif/* ATH_DEBUG */ #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) ath_tx_alq_post(sc, bf); #endif /* ATH_DEBUG_ALQ */ if (fifostart == 0) { if (bf->bf_flags & ATH_BUF_FIFOEND) fifostart = 1; continue; } /* Make sure we're not overflowing the FIFO! */ if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) { device_printf(sc->sc_dev, "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n", __func__, txq->axq_qnum, txq->axq_fifo_depth); } #if 0 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n", __func__, txq->axq_qnum, txq->axq_fifo_depth, bf, !! (bf->bf_flags & ATH_BUF_FIFOPTR), !! (bf->bf_flags & ATH_BUF_FIFOEND)); #endif /* * Set this to be the first buffer in the FIFO * list - even if it's also the last buffer in * a FIFO list! */ bf->bf_flags |= ATH_BUF_FIFOPTR; /* Push it into the FIFO and bump the FIFO count */ ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); txq->axq_fifo_depth++; /* * If this isn't the last entry either, let's * clear fifostart so we continue looking for * said last entry. */ if (! (bf->bf_flags & ATH_BUF_FIFOEND)) fifostart = 0; i++; } /* Only bother starting the queue if there's something in it */ if (i > 0) ath_hal_txstart(sc->sc_ah, txq->axq_qnum); DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n", __func__, txq->axq_qnum, old_fifo_depth, txq->axq_fifo_depth); /* And now, let's check! */ if (txq->axq_fifo_depth != old_fifo_depth) { device_printf(sc->sc_dev, "%s: Q%d: FIFO depth should be %d, is %d\n", __func__, txq->axq_qnum, old_fifo_depth, txq->axq_fifo_depth); } } /* * Hand off this frame to a hardware queue. * * Things are a bit hairy in the EDMA world. The TX FIFO is only * 8 entries deep, so we need to keep track of exactly what we've * pushed into the FIFO and what's just sitting in the TX queue, * waiting to go out. * * So this is split into two halves - frames get appended to the * TXQ; then a scheduler is called to push some frames into the * actual TX FIFO. */ static void ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TXQ_LOCK(txq); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); /* * XXX TODO: write a hard-coded check to ensure that * the queue id in the TX descriptor matches txq->axq_qnum. */ /* Update aggr stats */ if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth++; /* Push and update frame stats */ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); /* * Finally, call the FIFO schedule routine to schedule some * frames to the FIFO. */ ath_edma_tx_fifo_fill(sc, txq); ATH_TXQ_UNLOCK(txq); } /* * Hand off this frame to a multicast software queue. * * The EDMA TX CABQ will get a list of chained frames, chained * together using the next pointer. The single head of that * particular queue is pushed to the hardware CABQ. */ static void ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { ATH_TX_LOCK_ASSERT(sc); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); ATH_TXQ_LOCK(txq); /* * XXX this is mostly duplicated in ath_tx_handoff_mcast(). */ if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) { struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s); struct ieee80211_frame *wh; /* mark previous frame */ wh = mtod(bf_last->bf_m, struct ieee80211_frame *); wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; /* re-sync buffer to memory */ bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, BUS_DMASYNC_PREWRITE); /* link descriptor */ ath_hal_settxdesclink(sc->sc_ah, bf_last->bf_lastds, bf->bf_daddr); } #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) ath_tx_alq_post(sc, bf); #endif /* ATH_DEBUG_ALQ */ ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ATH_TXQ_UNLOCK(txq); } /* * Handoff this frame to the hardware. * * For the multicast queue, this will treat it as a software queue * and append it to the list, after updating the MORE_DATA flag * in the previous frame. The cabq processing code will ensure * that the queue contents gets transferred over. * * For the hardware queues, this will queue a frame to the queue * like before, then populate the FIFO from that. Since the * EDMA hardware has 8 FIFO slots per TXQ, this ensures that * frames such as management frames don't get prematurely dropped. * * This does imply that a similar flush-hwq-to-fifoq method will * need to be called from the processq function, before the * per-node software scheduler is called. */ static void ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { DPRINTF(sc, ATH_DEBUG_XMIT_DESC, "%s: called; bf=%p, txq=%p, qnum=%d\n", __func__, bf, txq, txq->axq_qnum); if (txq->axq_qnum == ATH_TXQ_SWQ) ath_edma_xmit_handoff_mcast(sc, txq, bf); else ath_edma_xmit_handoff_hw(sc, txq, bf); } static int ath_edma_setup_txfifo(struct ath_softc *sc, int qnum) { struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; te->m_fifo = malloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH, M_ATHDEV, M_NOWAIT | M_ZERO); if (te->m_fifo == NULL) { device_printf(sc->sc_dev, "%s: malloc failed\n", __func__); return (-ENOMEM); } /* * Set initial "empty" state. */ te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0; return (0); } static int ath_edma_free_txfifo(struct ath_softc *sc, int qnum) { struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum]; /* XXX TODO: actually deref the ath_buf entries? */ free(te->m_fifo, M_ATHDEV); return (0); } static int ath_edma_dma_txsetup(struct ath_softc *sc) { int error; int i; error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma, NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE); if (error != 0) return (error); ath_hal_setuptxstatusring(sc->sc_ah, (void *) sc->sc_txsdma.dd_desc, sc->sc_txsdma.dd_desc_paddr, ATH_TXSTATUS_RING_SIZE); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { ath_edma_setup_txfifo(sc, i); } return (0); } static int ath_edma_dma_txteardown(struct ath_softc *sc) { int i; for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { ath_edma_free_txfifo(sc, i); } ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL); return (0); } /* * Drain all TXQs, potentially after completing the existing completed * frames. */ static void ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) { int i; DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); (void) ath_stoptxdma(sc); /* * If reset type is noloss, the TX FIFO needs to be serviced * and those frames need to be handled. * * Otherwise, just toss everything in each TX queue. */ if (reset_type == ATH_RESET_NOLOSS) { ath_edma_tx_processq(sc, 0); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { ATH_TXQ_LOCK(&sc->sc_txq[i]); /* * Free the holding buffer; DMA is now * stopped. */ ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); /* * Reset the link pointer to NULL; there's * no frames to chain DMA to. */ sc->sc_txq[i].axq_link = NULL; ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } } } else { for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) ath_tx_draintxq(sc, &sc->sc_txq[i]); } } /* XXX dump out the TX completion FIFO contents */ /* XXX dump out the frames */ sc->sc_wd_timer = 0; } /* * TX completion tasklet. */ static void ath_edma_tx_proc(void *arg, int npending) { struct ath_softc *sc = (struct ath_softc *) arg; ATH_PCU_LOCK(sc); sc->sc_txproc_cnt++; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); #if 0 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n", __func__, npending); #endif ath_edma_tx_processq(sc, 1); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ath_tx_kick(sc); } /* * Process the TX status queue. */ static void ath_edma_tx_processq(struct ath_softc *sc, int dosched) { struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; struct ath_tx_status ts; struct ath_txq *txq; struct ath_buf *bf; struct ieee80211_node *ni; int nacked = 0; int idx; int i; #ifdef ATH_DEBUG /* XXX */ uint32_t txstatus[32]; #endif + DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called\n", __func__); + for (idx = 0; ; idx++) { bzero(&ts, sizeof(ts)); ATH_TXSTATUS_LOCK(sc); #ifdef ATH_DEBUG ath_hal_gettxrawtxdesc(ah, txstatus); #endif status = ath_hal_txprocdesc(ah, NULL, (void *) &ts); ATH_TXSTATUS_UNLOCK(sc); - if (status == HAL_EINPROGRESS) + if (status == HAL_EINPROGRESS) { + DPRINTF(sc, ATH_DEBUG_TX_PROC, + "%s: (%d): EINPROGRESS\n", + __func__, idx); break; + } #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_TX_PROC) if (ts.ts_queue_id != sc->sc_bhalq) ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id, idx, (status == HAL_OK)); #endif /* * If there is an error with this descriptor, continue * processing. * * XXX TBD: log some statistics? */ if (status == HAL_EIO) { device_printf(sc->sc_dev, "%s: invalid TX status?\n", __func__); break; } #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG) if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) { if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, sc->sc_tx_statuslen, (char *) txstatus); } #endif /* ATH_DEBUG_ALQ */ /* * At this point we have a valid status descriptor. * The QID and descriptor ID (which currently isn't set) * is part of the status. * * We then assume that the descriptor in question is the * -head- of the given QID. Eventually we should verify * this by using the descriptor ID. */ /* * The beacon queue is not currently a "real" queue. * Frames aren't pushed onto it and the lock isn't setup. * So skip it for now; the beacon handling code will * free and alloc more beacon buffers as appropriate. */ if (ts.ts_queue_id == sc->sc_bhalq) continue; txq = &sc->sc_txq[ts.ts_queue_id]; ATH_TXQ_LOCK(txq); bf = ATH_TXQ_FIRST(&txq->fifo); /* * Work around the situation where I'm seeing notifications * for Q1 when no frames are available. That needs to be * debugged but not by crashing _here_. */ if (bf == NULL) { device_printf(sc->sc_dev, "%s: Q%d: empty?\n", __func__, ts.ts_queue_id); ATH_TXQ_UNLOCK(txq); continue; } DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n", __func__, ts.ts_queue_id, bf, !! (bf->bf_flags & ATH_BUF_FIFOPTR), !! (bf->bf_flags & ATH_BUF_FIFOEND)); /* XXX TODO: actually output debugging info about this */ #if 0 /* XXX assert the buffer/descriptor matches the status descid */ if (ts.ts_desc_id != bf->bf_descid) { device_printf(sc->sc_dev, "%s: mismatched descid (qid=%d, tsdescid=%d, " "bfdescid=%d\n", __func__, ts.ts_queue_id, ts.ts_desc_id, bf->bf_descid); } #endif /* This removes the buffer and decrements the queue depth */ ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth--; /* * If this was the end of a FIFO set, decrement FIFO depth */ if (bf->bf_flags & ATH_BUF_FIFOEND) txq->axq_fifo_depth--; /* * If this isn't the final buffer in a FIFO set, mark * the buffer as busy so it goes onto the holding queue. */ if (! (bf->bf_flags & ATH_BUF_FIFOEND)) bf->bf_flags |= ATH_BUF_BUSY; DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n", __func__, txq->axq_qnum, txq->axq_fifo_depth, txq->fifo.axq_depth); /* XXX assert FIFO depth >= 0 */ ATH_TXQ_UNLOCK(txq); /* * Outside of the TX lock - if the buffer is end * end buffer in this FIFO, we don't need a holding * buffer any longer. */ if (bf->bf_flags & ATH_BUF_FIFOEND) { ATH_TXQ_LOCK(txq); ath_txq_freeholdingbuf(sc, txq); ATH_TXQ_UNLOCK(txq); } /* * First we need to make sure ts_rate is valid. * * Pre-EDMA chips pass the whole TX descriptor to * the proctxdesc function which will then fill out * ts_rate based on the ts_finaltsi (final TX index) * in the TX descriptor. However the TX completion * FIFO doesn't have this information. So here we * do a separate HAL call to populate that information. * * The same problem exists with ts_longretry. * The FreeBSD HAL corrects ts_longretry in the HAL layer; * the AR9380 HAL currently doesn't. So until the HAL * is imported and this can be added, we correct for it * here. */ /* XXX TODO */ /* XXX faked for now. Ew. */ if (ts.ts_finaltsi < 4) { ts.ts_rate = bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode; switch (ts.ts_finaltsi) { case 3: ts.ts_longretry += bf->bf_state.bfs_rc[2].tries; case 2: ts.ts_longretry += bf->bf_state.bfs_rc[1].tries; case 1: ts.ts_longretry += bf->bf_state.bfs_rc[0].tries; } } else { device_printf(sc->sc_dev, "%s: finaltsi=%d\n", __func__, ts.ts_finaltsi); ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode; } /* * XXX This is terrible. * * Right now, some code uses the TX status that is * passed in here, but the completion handlers in the * software TX path also use bf_status.ds_txstat. * Ew. That should all go away. * * XXX It's also possible the rate control completion * routine is called twice. */ memcpy(&bf->bf_status, &ts, sizeof(ts)); ni = bf->bf_node; /* Update RSSI */ /* XXX duplicate from ath_tx_processq */ if (ni != NULL && ts.ts_status == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { nacked++; sc->sc_stats.ast_tx_rssi = ts.ts_rssi; ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, ts.ts_rssi); } /* Handle frame completion and rate control update */ ath_tx_process_buf_completion(sc, txq, &ts, bf); /* NB: bf is invalid at this point */ } sc->sc_wd_timer = 0; /* * XXX It's inefficient to do this if the FIFO queue is full, * but there's no easy way right now to only populate * the txq task for _one_ TXQ. This should be fixed. */ if (dosched) { /* Attempt to schedule more hardware frames to the TX FIFO */ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { + ATH_TX_LOCK(sc); + ath_txq_sched(sc, &sc->sc_txq[i]); + ATH_TX_UNLOCK(sc); + ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_edma_tx_fifo_fill(sc, &sc->sc_txq[i]); ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } } /* Kick software scheduler */ ath_tx_swq_kick(sc); } + + DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: end\n", __func__); } static void ath_edma_attach_comp_func(struct ath_softc *sc) { TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc); } void ath_xmit_setup_edma(struct ath_softc *sc) { /* Fetch EDMA field and buffer sizes */ (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen); (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen); (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps); if (bootverbose) { device_printf(sc->sc_dev, "TX descriptor length: %d\n", sc->sc_tx_desclen); device_printf(sc->sc_dev, "TX status length: %d\n", sc->sc_tx_statuslen); device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n", sc->sc_tx_nmaps); } sc->sc_tx.xmit_setup = ath_edma_dma_txsetup; sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown; sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func; sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart; sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff; sc->sc_tx.xmit_drain = ath_edma_tx_drain; } Index: projects/clang400-import/sys/dev/e1000/em_txrx.c =================================================================== --- projects/clang400-import/sys/dev/e1000/em_txrx.c (revision 312719) +++ projects/clang400-import/sys/dev/e1000/em_txrx.c (revision 312720) @@ -1,746 +1,749 @@ /*- * Copyright (c) 2016 Matt Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "if_em.h" #ifdef RSS #include #include #endif #ifdef VERBOSE_DEBUG #define DPRINTF device_printf #else #define DPRINTF(...) #endif /********************************************************************* * Local Function prototypes *********************************************************************/ static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower); static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower); static int em_isc_txd_encap(void *arg, if_pkt_info_t pi); static void em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx); static int em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear); static void em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused); static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx); static int em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget); static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); static void lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused); static int lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget); static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri); static void em_receive_checksum(uint32_t status, if_rxd_info_t ri); extern int em_intr(void *arg); struct if_txrx em_txrx = { em_isc_txd_encap, em_isc_txd_flush, em_isc_txd_credits_update, em_isc_rxd_available, em_isc_rxd_pkt_get, em_isc_rxd_refill, em_isc_rxd_flush, em_intr }; struct if_txrx lem_txrx = { em_isc_txd_encap, em_isc_txd_flush, em_isc_txd_credits_update, lem_isc_rxd_available, lem_isc_rxd_pkt_get, lem_isc_rxd_refill, em_isc_rxd_flush, em_intr }; extern if_shared_ctx_t em_sctx; /********************************************************************** * * Setup work for hardware segmentation offload (TSO) on * adapters using advanced tx descriptors * **********************************************************************/ static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower) { if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; struct e1000_context_desc *TXD; struct em_txbuffer *tx_buffer; int cur, hdr_len; hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */ E1000_TXD_DTYP_D | /* Data descr type */ E1000_TXD_CMD_TSE); /* Do TSE on this packet */ /* IP and/or TCP header checksum calculation and insertion. */ *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; cur = pi->ipi_pidx; TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; tx_buffer = &txr->tx_buffers[cur]; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place put the checksum. */ TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen; TXD->lower_setup.ip_fields.ipcse = htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1); TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum); /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD->upper_setup.tcp_fields.tucss = pi->ipi_ehdrlen + pi->ipi_ip_hlen; TXD->upper_setup.tcp_fields.tucse = 0; TXD->upper_setup.tcp_fields.tucso = pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum); /* * Payload size per packet w/o any headers. * Length of all headers up to payload. */ TXD->tcp_seg_setup.fields.mss = htole16(pi->ipi_tso_segsz); TXD->tcp_seg_setup.fields.hdr_len = hdr_len; TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | /* Extended descr */ E1000_TXD_CMD_TSE | /* TSE context */ E1000_TXD_CMD_IP | /* Do IP csum */ E1000_TXD_CMD_TCP | /* Do TCP checksum */ (pi->ipi_len - hdr_len)); /* Total len */ tx_buffer->eop = -1; txr->tx_tso = TRUE; if (++cur == scctx->isc_ntxd[0]) { cur = 0; } DPRINTF(iflib_get_dev(adapter->ctx), "%s: pidx: %d cur: %d\n", __FUNCTION__, pi->ipi_pidx, cur); return (cur); } #define TSO_WORKAROUND 4 #define DONT_FORCE_CTX 1 /********************************************************************* * The offload context is protocol specific (TCP/UDP) and thus * only needs to be set when the protocol changes. The occasion * of a context change can be a performance detriment, and * might be better just disabled. The reason arises in the way * in which the controller supports pipelined requests from the * Tx data DMA. Up to four requests can be pipelined, and they may * belong to the same packet or to multiple packets. However all * requests for one packet are issued before a request is issued * for a subsequent packet and if a request for the next packet * requires a context change, that request will be stalled * until the previous request completes. This means setting up * a new context effectively disables pipelined Tx data DMA which * in turn greatly slow down performance to send small sized * frames. **********************************************************************/ static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower) { struct e1000_context_desc *TXD = NULL; if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; struct em_txbuffer *tx_buffer; int csum_flags = pi->ipi_csum_flags; int cur, hdr_len; u32 cmd; cur = pi->ipi_pidx; hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen; cmd = adapter->txd_cmd; /* * The 82574L can only remember the *last* context used * regardless of queue that it was use for. We cannot reuse * contexts on this hardware platform and must generate a new * context every time. 82574L hardware spec, section 7.2.6, * second note. */ if (DONT_FORCE_CTX && adapter->tx_num_queues == 1 && txr->csum_lhlen == pi->ipi_ehdrlen && txr->csum_iphlen == pi->ipi_ip_hlen && txr->csum_flags == csum_flags) { /* * Same csum offload context as the previous packets; * just return. */ *txd_upper = txr->csum_txd_upper; *txd_lower = txr->csum_txd_lower; return (cur); } TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; if (csum_flags & CSUM_IP) { *txd_upper |= E1000_TXD_POPTS_IXSM << 8; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen; TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len); TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum); cmd |= E1000_TXD_CMD_IP; } if (csum_flags & (CSUM_TCP|CSUM_UDP)) { uint8_t tucso; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; if (csum_flags & CSUM_TCP) { tucso = hdr_len + offsetof(struct tcphdr, th_sum); cmd |= E1000_TXD_CMD_TCP; } else tucso = hdr_len + offsetof(struct udphdr, uh_sum); TXD->upper_setup.tcp_fields.tucss = hdr_len; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; } txr->csum_lhlen = pi->ipi_ehdrlen; txr->csum_iphlen = pi->ipi_ip_hlen; txr->csum_flags = csum_flags; txr->csum_txd_upper = *txd_upper; txr->csum_txd_lower = *txd_lower; TXD->tcp_seg_setup.data = htole32(0); TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); tx_buffer = &txr->tx_buffers[cur]; tx_buffer->eop = -1; if (++cur == scctx->isc_ntxd[0]) { cur = 0; } DPRINTF(iflib_get_dev(adapter->ctx), "checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x hdr_len=%d cmd=%x\n", csum_flags, *txd_upper, *txd_lower, hdr_len, cmd); return (cur); } static int em_isc_txd_encap(void *arg, if_pkt_info_t pi) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; bus_dma_segment_t *segs = pi->ipi_segs; int nsegs = pi->ipi_nsegs; int csum_flags = pi->ipi_csum_flags; int i, j, first, pidx_last; u32 txd_upper = 0, txd_lower = 0; struct em_txbuffer *tx_buffer; struct e1000_tx_desc *ctxd = NULL; bool do_tso, tso_desc; i = first = pi->ipi_pidx; do_tso = (csum_flags & CSUM_TSO); tso_desc = FALSE; /* * TSO Hardware workaround, if this packet is not * TSO, and is only a single descriptor long, and * it follows a TSO burst, then we need to add a * sentinel descriptor to prevent premature writeback. */ if ((!do_tso) && (txr->tx_tso == TRUE)) { if (nsegs == 1) tso_desc = TRUE; txr->tx_tso = FALSE; } /* Do hardware assists */ if (do_tso) { i = em_tso_setup(sc, pi, &txd_upper, &txd_lower); tso_desc = TRUE; } else if (csum_flags & EM_CSUM_OFFLOAD) { i = em_transmit_checksum_setup(sc, pi, &txd_upper, &txd_lower); } if (pi->ipi_mflags & M_VLANTAG) { /* Set the vlan id. */ txd_upper |= htole16(pi->ipi_vtag) << 16; /* Tell hardware to add tag */ txd_lower |= htole32(E1000_TXD_CMD_VLE); } DPRINTF(iflib_get_dev(sc->ctx), "encap: set up tx: nsegs=%d first=%d i=%d\n", nsegs, first, i); /* XXX adapter->pcix_82544 -- lem_fill_descriptors */ /* Set up our transmit descriptors */ for (j = 0; j < nsegs; j++) { bus_size_t seg_len; bus_addr_t seg_addr; uint32_t cmd; ctxd = &txr->tx_base[i]; tx_buffer = &txr->tx_buffers[i]; seg_addr = segs[j].ds_addr; seg_len = segs[j].ds_len; cmd = E1000_TXD_CMD_IFCS | sc->txd_cmd; /* ** TSO Workaround: ** If this is the last descriptor, we want to ** split it so we have a small final sentinel */ if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) { seg_len -= TSO_WORKAROUND; ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32(cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); if (++i == scctx->isc_ntxd[0]) i = 0; /* Now make the sentinel */ ctxd = &txr->tx_base[i]; tx_buffer = &txr->tx_buffers[i]; ctxd->buffer_addr = htole64(seg_addr + seg_len); ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND); ctxd->upper.data = htole32(txd_upper); pidx_last = i; if (++i == scctx->isc_ntxd[0]) i = 0; DPRINTF(iflib_get_dev(sc->ctx), "TSO path pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]); } else { ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32(cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); pidx_last = i; if (++i == scctx->isc_ntxd[0]) i = 0; DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]); } tx_buffer->eop = -1; } /* * Last Descriptor of Packet * needs End Of Packet (EOP) * and Report Status (RS) */ ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); tx_buffer = &txr->tx_buffers[first]; tx_buffer->eop = pidx_last; DPRINTF(iflib_get_dev(sc->ctx), "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i); pi->ipi_new_pidx = i; return (0); } static void em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx) { struct adapter *adapter = arg; struct em_tx_queue *que = &adapter->tx_queues[txqid]; struct tx_ring *txr = &que->txr; E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx); } static int em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que = &adapter->tx_queues[txqid]; struct tx_ring *txr = &que->txr; u32 cidx, processed = 0; int last, done; struct em_txbuffer *buf; struct e1000_tx_desc *tx_desc, *eop_desc; cidx = cidx_init; buf = &txr->tx_buffers[cidx]; tx_desc = &txr->tx_base[cidx]; - last = buf->eop; + last = buf->eop; + if (last == -1) + return (processed); eop_desc = &txr->tx_base[last]; - DPRINTF(iflib_get_dev(adapter->ctx), "credits_update: cidx_init=%d clear=%d last=%d\n", + DPRINTF(iflib_get_dev(adapter->ctx), + "credits_update: cidx_init=%d clear=%d last=%d\n", cidx_init, clear, last); /* * What this does is get the index of the * first descriptor AFTER the EOP of the * first packet, that way we can do the * simple comparison on the inner while loop. */ if (++last == scctx->isc_ntxd[0]) - last = 0; + last = 0; done = last; while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { /* We clean the range of the packet */ while (cidx != done) { if (clear) { tx_desc->upper.data = 0; tx_desc->lower.data = 0; tx_desc->buffer_addr = 0; buf->eop = -1; } tx_desc++; buf++; processed++; - + /* wrap the ring ? */ if (++cidx == scctx->isc_ntxd[0]) { cidx = 0; } buf = &txr->tx_buffers[cidx]; tx_desc = &txr->tx_base[cidx]; } /* See if we can continue to the next packet */ last = buf->eop; if (last == -1) break; eop_desc = &txr->tx_base[last]; /* Get new done point */ if (++last == scctx->isc_ntxd[0]) last = 0; done = last; } DPRINTF(iflib_get_dev(adapter->ctx), "Processed %d credits update\n", processed); return(processed); } static void lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; struct e1000_rx_desc *rxd; int i; uint32_t next_pidx; for (i = 0, next_pidx = pidx; i < count; i++) { rxd = (struct e1000_rx_desc *)&rxr->rx_base[next_pidx]; rxd->buffer_addr = htole64(paddrs[i]); /* status bits must be cleared */ rxd->status = 0; if (++next_pidx == scctx->isc_nrxd[0]) next_pidx = 0; } } static void em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; union e1000_rx_desc_extended *rxd; int i; uint32_t next_pidx; for (i = 0, next_pidx = pidx; i < count; i++) { rxd = &rxr->rx_base[next_pidx]; rxd->read.buffer_addr = htole64(paddrs[i]); /* DD bits must be cleared */ rxd->wb.upper.status_error = 0; if (++next_pidx == scctx->isc_nrxd[0]) next_pidx = 0; } } static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx) { struct adapter *sc = arg; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx); } static int lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; struct e1000_rx_desc *rxd; u32 staterr = 0; int cnt, i; for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { rxd = (struct e1000_rx_desc *)&rxr->rx_base[i]; staterr = rxd->status; if ((staterr & E1000_RXD_STAT_DD) == 0) break; if (++i == scctx->isc_nrxd[0]) i = 0; if (staterr & E1000_RXD_STAT_EOP) cnt++; } return (cnt); } static int em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct em_rx_queue *que = &sc->rx_queues[rxqid]; struct rx_ring *rxr = &que->rxr; union e1000_rx_desc_extended *rxd; u32 staterr = 0; int cnt, i; for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) { rxd = &rxr->rx_base[i]; staterr = le32toh(rxd->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; if (++i == scctx->isc_nrxd[0]) { i = 0; } if (staterr & E1000_RXD_STAT_EOP) cnt++; } return (cnt); } static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; struct e1000_rx_desc *rxd; u16 len; u32 status, errors; bool eop; int i, cidx; status = errors = i = 0; cidx = ri->iri_cidx; do { rxd = (struct e1000_rx_desc *)&rxr->rx_base[cidx]; status = rxd->status; errors = rxd->errors; /* Error Checking then decrement count */ MPASS ((status & E1000_RXD_STAT_DD) != 0); len = le16toh(rxd->length); ri->iri_len += len; eop = (status & E1000_RXD_STAT_EOP) != 0; /* Make sure bad packets are discarded */ if (errors & E1000_RXD_ERR_FRAME_ERR_MASK) { adapter->dropped_pkts++; /* XXX fixup if common */ return (EBADMSG); } ri->iri_frags[i].irf_flid = 0; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = len; /* Zero out the receive descriptors status. */ rxd->status = 0; if (++cidx == scctx->isc_nrxd[0]) cidx = 0; i++; } while (!eop); /* XXX add a faster way to look this up */ if (adapter->hw.mac.type >= e1000_82543 && !(status & E1000_RXD_STAT_IXSM)) lem_receive_checksum(status, errors, ri); if (status & E1000_RXD_STAT_VP) { ri->iri_vtag = le16toh(rxd->special); ri->iri_flags |= M_VLANTAG; } ri->iri_nfrags = i; return (0); } static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct adapter *adapter = arg; if_softc_ctx_t scctx = adapter->shared; struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; union e1000_rx_desc_extended *rxd; u16 len; u32 staterr = 0; bool eop; int i, cidx, vtag; i = vtag = 0; cidx = ri->iri_cidx; do { rxd = &rxr->rx_base[cidx]; staterr = le32toh(rxd->wb.upper.status_error); /* Error Checking then decrement count */ MPASS ((staterr & E1000_RXD_STAT_DD) != 0); len = le16toh(rxd->wb.upper.length); ri->iri_len += len; eop = (staterr & E1000_RXD_STAT_EOP) != 0; /* Make sure bad packets are discarded */ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { adapter->dropped_pkts++; return EBADMSG; } ri->iri_frags[i].irf_flid = 0; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = len; /* Zero out the receive descriptors status. */ rxd->wb.upper.status_error &= htole32(~0xFF); if (++cidx == scctx->isc_nrxd[0]) cidx = 0; i++; } while (!eop); /* XXX add a faster way to look this up */ if (adapter->hw.mac.type >= e1000_82543) em_receive_checksum(staterr, ri); if (staterr & E1000_RXD_STAT_VP) { vtag = le16toh(rxd->wb.upper.vlan); } ri->iri_vtag = vtag; ri->iri_nfrags = i; if (vtag) ri->iri_flags |= M_VLANTAG; return (0); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri) { /* Did it pass? */ if (status & E1000_RXD_STAT_IPCS && !(errors & E1000_RXD_ERR_IPE)) ri->iri_csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID); if (status & E1000_RXD_STAT_TCPCS) { /* Did it pass? */ if (!(errors & E1000_RXD_ERR_TCPE)) { ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); ri->iri_csum_data = htons(0xffff); } } } static void em_receive_checksum(uint32_t status, if_rxd_info_t ri) { ri->iri_csum_flags = 0; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) return; /* If the IP checksum exists and there is no IP Checksum error */ if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == E1000_RXD_STAT_IPCS) { ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } /* TCP or UDP checksum */ if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == E1000_RXD_STAT_TCPCS) { ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); ri->iri_csum_data = htons(0xffff); } if (status & E1000_RXD_STAT_UDPCS) { ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); ri->iri_csum_data = htons(0xffff); } } Index: projects/clang400-import/sys/dev/e1000/if_em.c =================================================================== --- projects/clang400-import/sys/dev/e1000/if_em.c (revision 312719) +++ projects/clang400-import/sys/dev/e1000/if_em.c (revision 312720) @@ -1,4296 +1,4308 @@ /*- * Copyright (c) 2016 Matt Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "if_em.h" #include #include #define em_mac_min e1000_82547 #define igb_mac_min e1000_82575 /********************************************************************* * Driver version: *********************************************************************/ char em_driver_version[] = "7.6.1-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static pci_vendor_info_t em_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection - Legacy em*/ PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) PRO/1000 Network Connection"), /* Intel(R) PRO/1000 Network Connection - em */ PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571EB_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_IFE_GT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IFE, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IFE_GT, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_IFE_G, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH9_BM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82574L, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_82574LA, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_LF, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH10_R_BM_V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_LF, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_ICH10_D_BM_V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_M_HV_LC, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_D_HV_DC, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH2_LV_LM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_I218_LM3, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V4, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5, "Intel(R) PRO/1000 Network Connection"), PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V5, "Intel(R) PRO/1000 Network Connection"), /* required last entry */ PVID_END }; static pci_vendor_info_t igb_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection - em */ PVID(0x8086, E1000_DEV_ID_82575EB_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_NS, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_NS_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_SERDES_QUAD, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82576_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82580_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82580_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82580_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82580_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82580_COPPER_DUAL, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_82580_QUAD_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_DH89XXCC_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_DH89XXCC_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_DH89XXCC_SFP, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I350_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I350_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I350_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I350_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I350_VF, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_COPPER_IT, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_COPPER_OEM1, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_FIBER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_SERDES, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I210_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I211_COPPER, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, "Intel(R) PRO/1000 PCI-Express Network Driver"), PVID(0x8086, E1000_DEV_ID_I354_SGMII, "Intel(R) PRO/1000 PCI-Express Network Driver"), /* required last entry */ PVID_END }; /********************************************************************* * Function prototypes *********************************************************************/ static void *em_register(device_t dev); static void *igb_register(device_t dev); static int em_if_attach_pre(if_ctx_t ctx); static int em_if_attach_post(if_ctx_t ctx); static int em_if_detach(if_ctx_t ctx); static int em_if_shutdown(if_ctx_t ctx); static int em_if_suspend(if_ctx_t ctx); static int em_if_resume(if_ctx_t ctx); static int em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static void em_if_queues_free(if_ctx_t ctx); static uint64_t em_if_get_counter(if_ctx_t, ift_counter); static void em_if_init(if_ctx_t ctx); static void em_if_stop(if_ctx_t ctx); static void em_if_media_status(if_ctx_t, struct ifmediareq *); static int em_if_media_change(if_ctx_t ctx); static int em_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void em_if_timer(if_ctx_t ctx, uint16_t qid); static void em_if_vlan_register(if_ctx_t ctx, u16 vtag); static void em_if_vlan_unregister(if_ctx_t ctx, u16 vtag); static void em_identify_hardware(if_ctx_t ctx); static int em_allocate_pci_resources(if_ctx_t ctx); static void em_free_pci_resources(if_ctx_t ctx); static void em_reset(if_ctx_t ctx); static int em_setup_interface(if_ctx_t ctx); static int em_setup_msix(if_ctx_t ctx); static void em_initialize_transmit_unit(if_ctx_t ctx); static void em_initialize_receive_unit(if_ctx_t ctx); static void em_if_enable_intr(if_ctx_t ctx); static void em_if_disable_intr(if_ctx_t ctx); static int em_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static void em_if_multi_set(if_ctx_t ctx); static void em_if_update_admin_status(if_ctx_t ctx); static void em_update_stats_counters(struct adapter *); static void em_add_hw_stats(struct adapter *adapter); static int em_if_set_promisc(if_ctx_t ctx, int flags); static void em_setup_vlan_hw_support(struct adapter *); static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void em_print_nvm_info(struct adapter *); static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static void em_print_debug_info(struct adapter *); static int em_is_valid_ether_addr(u8 *); static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); static void em_add_int_delay_sysctl(struct adapter *, const char *, const char *, struct em_int_delay_info *, int, int); /* Management and WOL Support */ static void em_init_manageability(struct adapter *); static void em_release_manageability(struct adapter *); static void em_get_hw_control(struct adapter *); static void em_release_hw_control(struct adapter *); static void em_get_wakeup(if_ctx_t ctx); static void em_enable_wakeup(if_ctx_t ctx); static int em_enable_phy_wakeup(struct adapter *); static void em_disable_aspm(struct adapter *); int em_intr(void *arg); static void em_disable_promisc(if_ctx_t ctx); /* MSIX handlers */ static int em_if_msix_intr_assign(if_ctx_t, int); static int em_msix_link(void *); static void em_handle_link(void *context); static void em_enable_vectors_82574(if_ctx_t); static void em_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int em_set_flowcntl(SYSCTL_HANDLER_ARGS); static int em_sysctl_eee(SYSCTL_HANDLER_ARGS); static void em_if_led_func(if_ctx_t ctx, int onoff); static void em_init_tx_ring(struct em_tx_queue *que); static int em_get_regs(SYSCTL_HANDLER_ARGS); static void lem_smartspeed(struct adapter *adapter); static void igb_configure_queues(struct adapter *adapter); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t em_methods[] = { /* Device interface */ DEVMETHOD(device_register, em_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static device_method_t igb_methods[] = { /* Device interface */ DEVMETHOD(device_register, igb_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t em_driver = { "em", em_methods, sizeof(struct adapter), }; static devclass_t em_devclass; DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); MODULE_DEPEND(em, pci, 1, 1, 1); MODULE_DEPEND(em, ether, 1, 1, 1); MODULE_DEPEND(em, iflib, 1, 1, 1); static driver_t igb_driver = { "igb", igb_methods, sizeof(struct adapter), }; static devclass_t igb_devclass; DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0); MODULE_DEPEND(igb, pci, 1, 1, 1); MODULE_DEPEND(igb, ether, 1, 1, 1); MODULE_DEPEND(igb, iflib, 1, 1, 1); static device_method_t em_if_methods[] = { DEVMETHOD(ifdi_attach_pre, em_if_attach_pre), DEVMETHOD(ifdi_attach_post, em_if_attach_post), DEVMETHOD(ifdi_detach, em_if_detach), DEVMETHOD(ifdi_shutdown, em_if_shutdown), DEVMETHOD(ifdi_suspend, em_if_suspend), DEVMETHOD(ifdi_resume, em_if_resume), DEVMETHOD(ifdi_init, em_if_init), DEVMETHOD(ifdi_stop, em_if_stop), DEVMETHOD(ifdi_msix_intr_assign, em_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, em_if_enable_intr), DEVMETHOD(ifdi_intr_disable, em_if_disable_intr), DEVMETHOD(ifdi_tx_queues_alloc, em_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, em_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, em_if_queues_free), DEVMETHOD(ifdi_update_admin_status, em_if_update_admin_status), DEVMETHOD(ifdi_multi_set, em_if_multi_set), DEVMETHOD(ifdi_media_status, em_if_media_status), DEVMETHOD(ifdi_media_change, em_if_media_change), DEVMETHOD(ifdi_mtu_set, em_if_mtu_set), DEVMETHOD(ifdi_promisc_set, em_if_set_promisc), DEVMETHOD(ifdi_timer, em_if_timer), DEVMETHOD(ifdi_vlan_register, em_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, em_if_vlan_unregister), DEVMETHOD(ifdi_get_counter, em_if_get_counter), DEVMETHOD(ifdi_led_func, em_if_led_func), DEVMETHOD(ifdi_queue_intr_enable, em_if_queue_intr_enable), DEVMETHOD_END }; /* * note that if (adapter->msix_mem) is replaced by: * if (adapter->intr_type == IFLIB_INTR_MSIX) */ static driver_t em_if_driver = { "em_if", em_if_methods, sizeof(struct adapter) }; /********************************************************************* * Tunable default values. *********************************************************************/ #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) #define M_TSO_LEN 66 #define MAX_INTS_PER_SEC 8000 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256)) /* Allow common code without TSO */ #ifndef CSUM_TSO #define CSUM_TSO 0 #endif #define TSO_WORKAROUND 4 static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters"); static int em_disable_crc_stripping = 0; SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, &em_disable_crc_stripping, 0, "Disable CRC Stripping"); static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt, 0, "Default transmit interrupt delay in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt, 0, "Default receive interrupt delay in usecs"); static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN, &em_tx_abs_int_delay_dflt, 0, "Default transmit interrupt delay limit in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN, &em_rx_abs_int_delay_dflt, 0, "Default receive interrupt delay limit in usecs"); static int em_smart_pwr_down = FALSE; SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down, 0, "Set to true to leave smart power down enabled on newer adapters"); /* Controls whether promiscuous also shows bad packets */ static int em_debug_sbp = TRUE; SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0, "Show bad packets in promiscuous mode"); /* How many packets rxeof tries to clean at a time */ static int em_rx_process_limit = 100; SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &em_rx_process_limit, 0, "Maximum number of received packets to process " "at a time, -1 means unlimited"); /* Energy efficient ethernet - default to OFF */ static int eee_setting = 1; SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0, "Enable Energy Efficient Ethernet"); /* ** Tuneable Interrupt rate */ static int em_max_interrupt_rate = 8000; SYSCTL_INT(_hw_em, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &em_max_interrupt_rate, 0, "Maximum interrupts per second"); /* Global used in WOL setup with multiport cards */ static int global_quad_port_a = 0; extern struct if_txrx igb_txrx; extern struct if_txrx em_txrx; extern struct if_txrx lem_txrx; static struct if_shared_ctx em_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = EM_TSO_SIZE, .isc_tx_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = MJUM9BYTES, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = MJUM9BYTES, .isc_nfl = 1, .isc_nrxqs = 1, .isc_ntxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = em_vendor_info_array, .isc_driver_version = em_driver_version, .isc_driver = &em_if_driver, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP, .isc_nrxd_min = {EM_MIN_RXD}, .isc_ntxd_min = {EM_MIN_TXD}, .isc_nrxd_max = {EM_MAX_RXD}, .isc_ntxd_max = {EM_MAX_TXD}, .isc_nrxd_default = {EM_DEFAULT_RXD}, .isc_ntxd_default = {EM_DEFAULT_TXD}, }; if_shared_ctx_t em_sctx = &em_sctx_init; static struct if_shared_ctx igb_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = EM_TSO_SIZE, .isc_tx_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = MJUM9BYTES, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = MJUM9BYTES, .isc_nfl = 1, .isc_nrxqs = 1, .isc_ntxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = igb_vendor_info_array, .isc_driver_version = em_driver_version, .isc_driver = &em_if_driver, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP, .isc_nrxd_min = {EM_MIN_RXD}, .isc_ntxd_min = {EM_MIN_TXD}, .isc_nrxd_max = {EM_MAX_RXD}, .isc_ntxd_max = {EM_MAX_TXD}, .isc_nrxd_default = {EM_DEFAULT_RXD}, .isc_ntxd_default = {EM_DEFAULT_TXD}, }; if_shared_ctx_t igb_sctx = &igb_sctx_init; /***************************************************************** * * Dump Registers * ****************************************************************/ #define IGB_REGS_LEN 739 static int em_get_regs(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; struct e1000_hw *hw = &adapter->hw; struct sbuf *sb; u32 *regs_buff = (u32 *)malloc(sizeof(u32) * IGB_REGS_LEN, M_DEVBUF, M_NOWAIT); int rc; memset(regs_buff, 0, IGB_REGS_LEN * sizeof(u32)); rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req); MPASS(sb != NULL); if (sb == NULL) return (ENOMEM); /* General Registers */ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL); regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS); regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT); regs_buff[3] = E1000_READ_REG(hw, E1000_ICR); regs_buff[4] = E1000_READ_REG(hw, E1000_RCTL); regs_buff[5] = E1000_READ_REG(hw, E1000_RDLEN(0)); regs_buff[6] = E1000_READ_REG(hw, E1000_RDH(0)); regs_buff[7] = E1000_READ_REG(hw, E1000_RDT(0)); regs_buff[8] = E1000_READ_REG(hw, E1000_RXDCTL(0)); regs_buff[9] = E1000_READ_REG(hw, E1000_RDBAL(0)); regs_buff[10] = E1000_READ_REG(hw, E1000_RDBAH(0)); regs_buff[11] = E1000_READ_REG(hw, E1000_TCTL); regs_buff[12] = E1000_READ_REG(hw, E1000_TDBAL(0)); regs_buff[13] = E1000_READ_REG(hw, E1000_TDBAH(0)); regs_buff[14] = E1000_READ_REG(hw, E1000_TDLEN(0)); regs_buff[15] = E1000_READ_REG(hw, E1000_TDH(0)); regs_buff[16] = E1000_READ_REG(hw, E1000_TDT(0)); regs_buff[17] = E1000_READ_REG(hw, E1000_TXDCTL(0)); regs_buff[18] = E1000_READ_REG(hw, E1000_TDFH); regs_buff[19] = E1000_READ_REG(hw, E1000_TDFT); regs_buff[20] = E1000_READ_REG(hw, E1000_TDFHS); regs_buff[21] = E1000_READ_REG(hw, E1000_TDFPC); sbuf_printf(sb, "General Registers\n"); sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]); sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]); sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]); sbuf_printf(sb, "Interrupt Registers\n"); sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]); sbuf_printf(sb, "RX Registers\n"); sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]); sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]); sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]); sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]); sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]); sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]); sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]); sbuf_printf(sb, "TX Registers\n"); sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]); sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]); sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]); sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]); sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]); sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]); sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]); sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]); sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]); sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]); sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]); #ifdef DUMP_DESCS { if_softc_ctx_t scctx = adapter->shared; struct rx_ring *rxr = &rx_que->rxr; struct tx_ring *txr = &tx_que->txr; int ntxd = scctx->isc_ntxd[0]; int nrxd = scctx->isc_nrxd[0]; int j; for (j = 0; j < nrxd; j++) { u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error); u32 length = le32toh(rxr->rx_base[j].wb.upper.length); sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length); } for (j = 0; j < min(ntxd, 256); j++) { struct em_txbuffer *buf = &txr->tx_buffers[j]; unsigned int *ptr = (unsigned int *)&txr->tx_base[j]; sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n", j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop, buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & E1000_TXD_STAT_DD : 0); } } #endif rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } static void * em_register(device_t dev) { return (em_sctx); } static void * igb_register(device_t dev) { return (igb_sctx); } static void em_init_tx_ring(struct em_tx_queue *que) { struct adapter *sc = que->adapter; if_softc_ctx_t scctx = sc->shared; struct tx_ring *txr = &que->txr; struct em_txbuffer *tx_buffer; tx_buffer = txr->tx_buffers; for (int i = 0; i < scctx->isc_ntxd[0]; i++, tx_buffer++) { tx_buffer->eop = -1; } } static int em_set_num_queues(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); int maxqueues; /* Sanity check based on HW */ switch (adapter->hw.mac.type) { case e1000_82576: case e1000_82580: case e1000_i350: case e1000_i354: maxqueues = 8; break; case e1000_i210: case e1000_82575: maxqueues = 4; break; case e1000_i211: case e1000_82574: maxqueues = 2; break; default: maxqueues = 1; break; } return (maxqueues); } #define EM_CAPS \ IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \ IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | \ IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; #define IGB_CAPS \ IFCAP_TSO4 | IFCAP_TXCSUM | IFCAP_LRO | IFCAP_RXCSUM | IFCAP_VLAN_HWFILTER | IFCAP_WOL_MAGIC | \ IFCAP_WOL_MCAST | IFCAP_WOL | IFCAP_VLAN_HWTSO | IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | \ IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_TXCSUM_IPV6 | IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU; /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int em_if_attach_pre(if_ctx_t ctx) { struct adapter *adapter; if_softc_ctx_t scctx; device_t dev; struct e1000_hw *hw; int error = 0; INIT_DEBUGOUT("em_if_attach_pre begin"); dev = iflib_get_dev(ctx); adapter = iflib_get_softc(ctx); if (resource_disabled("em", device_get_unit(dev))) { device_printf(dev, "Disabled by device hint\n"); return (ENXIO); } adapter->ctx = ctx; adapter->dev = adapter->osdep.dev = dev; scctx = adapter->shared = iflib_get_softc_ctx(ctx); adapter->media = iflib_get_media(ctx); hw = &adapter->hw; adapter->tx_process_limit = scctx->isc_ntxd[0]; /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_nvm_info, "I", "NVM Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_set_flowcntl, "I", "Flow Control"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "reg_dump", CTLTYPE_STRING | CTLFLAG_RD, adapter, 0, em_get_regs, "A", "Dump Registers"); /* Determine hardware and mac info */ em_identify_hardware(ctx); /* Set isc_msix_bar */ scctx->isc_msix_bar = PCIR_BAR(EM_MSIX_BAR); scctx->isc_tx_nsegments = EM_MAX_SCATTER; scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; scctx->isc_tx_tso_size_max = EM_TSO_SIZE; scctx->isc_tx_tso_segsize_max = EM_TSO_SEG_SIZE; scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = em_set_num_queues(ctx); device_printf(dev, "attach_pre capping queues at %d\n", scctx->isc_ntxqsets_max); scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO; if (adapter->hw.mac.type >= igb_mac_min) { int try_second_bar; scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN); scctx->isc_txrx = &igb_txrx; scctx->isc_capenable = IGB_CAPS; scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP \ | CSUM_IP6_UDP | CSUM_IP6_TCP; if (adapter->hw.mac.type != e1000_82575) scctx->isc_tx_csum_flags |= CSUM_SCTP | CSUM_IP6_SCTP; /* ** Some new devices, as with ixgbe, now may ** use a different BAR, so we need to keep ** track of which is used. */ try_second_bar = pci_read_config(dev, scctx->isc_msix_bar, 4); if (try_second_bar == 0) scctx->isc_msix_bar += 4; } else if (adapter->hw.mac.type >= em_mac_min) { scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]* sizeof(struct e1000_tx_desc), EM_DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN); scctx->isc_txrx = &em_txrx; scctx->isc_capenable = EM_CAPS; scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO; } else { scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN); scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO; scctx->isc_txrx = &lem_txrx; scctx->isc_capenable = EM_CAPS; if (adapter->hw.mac.type < e1000_82543) scctx->isc_capenable &= ~(IFCAP_HWCSUM|IFCAP_VLAN_HWCSUM); scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO; scctx->isc_msix_bar = 0; } /* Setup PCI resources */ if (em_allocate_pci_resources(ctx)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* ** For ICH8 and family we need to ** map the flash memory, and this ** must happen after the MAC is ** identified */ if ((hw->mac.type == e1000_ich8lan) || (hw->mac.type == e1000_ich9lan) || (hw->mac.type == e1000_ich10lan) || (hw->mac.type == e1000_pchlan) || (hw->mac.type == e1000_pch2lan) || (hw->mac.type == e1000_pch_lpt)) { int rid = EM_BAR_TYPE_FLASH; adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->flash == NULL) { device_printf(dev, "Mapping of Flash failed\n"); error = ENXIO; goto err_pci; } /* This is used in the shared code */ hw->flash_address = (u8 *)adapter->flash; adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash); adapter->osdep.flash_bus_space_handle = rman_get_bushandle(adapter->flash); } /* ** In the new SPT device flash is not a ** separate BAR, rather it is also in BAR0, ** so use the same tag and an offset handle for the ** FLASH read/write macros in the shared code. */ else if (hw->mac.type == e1000_pch_spt) { adapter->osdep.flash_bus_space_tag = adapter->osdep.mem_bus_space_tag; adapter->osdep.flash_bus_space_handle = adapter->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR; } /* Do Shared Code initialization */ error = e1000_setup_init_funcs(hw, TRUE); if (error) { device_printf(dev, "Setup of Shared code failed, error %d\n", error); error = ENXIO; goto err_pci; } em_setup_msix(ctx); e1000_get_bus_info(hw); /* Set up some sysctls for the tunable interrupt delays */ em_add_int_delay_sysctl(adapter, "rx_int_delay", "receive interrupt delay in usecs", &adapter->rx_int_delay, E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_int_delay", "transmit interrupt delay in usecs", &adapter->tx_int_delay, E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", "receive interrupt delay limit in usecs", &adapter->rx_abs_int_delay, E1000_REGISTER(hw, E1000_RADV), em_rx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", "transmit interrupt delay limit in usecs", &adapter->tx_abs_int_delay, E1000_REGISTER(hw, E1000_TADV), em_tx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "itr", "interrupt delay limit in usecs/4", &adapter->tx_itr, E1000_REGISTER(hw, E1000_ITR), DEFAULT_ITR); /* Sysctl for limiting the amount of work done in the taskqueue */ em_set_sysctl_value(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, em_rx_process_limit); hw->mac.autoneg = DO_AUTO_NEG; hw->phy.autoneg_wait_to_complete = FALSE; hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; if (adapter->hw.mac.type < em_mac_min) { e1000_init_script_state_82541(&adapter->hw, TRUE); e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); } /* Copper options */ if (hw->phy.media_type == e1000_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; hw->phy.disable_polarity_correction = FALSE; hw->phy.ms_type = EM_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; /* * This controls when hardware reports transmit completion * status. */ hw->mac.report_tx_early = 1; /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Check SOL/IDER usage */ if (e1000_check_reset_block(hw)) device_printf(dev, "PHY reset is blocked" " due to SOL/IDER session.\n"); /* Sysctl for setting Energy Efficient Ethernet */ hw->dev_spec.ich8lan.eee_disable = eee_setting; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "eee_control", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_eee, "I", "Disable Energy Efficient Ethernet"); /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(hw); /* Make sure we have a good EEPROM before we read from it */ if (e1000_validate_nvm_checksum(hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } if (!em_is_valid_ether_addr(hw->mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* Disable ULP support */ e1000_disable_ulp_lpt_lp(hw, TRUE); /* * Get Wake-on-Lan and Management info for later use */ em_get_wakeup(ctx); iflib_set_mac(ctx, hw->mac.addr); return (0); err_late: em_release_hw_control(adapter); err_pci: em_free_pci_resources(ctx); free(adapter->mta, M_DEVBUF); return (error); } static int em_if_attach_post(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct e1000_hw *hw = &adapter->hw; int error = 0; /* Setup OS specific network interface */ error = em_setup_interface(ctx); if (error != 0) { goto err_late; } em_reset(ctx); /* Initialize statistics */ em_update_stats_counters(adapter); hw->mac.get_link_status = 1; em_if_update_admin_status(ctx); em_add_hw_stats(adapter); /* Non-AMT based hardware can now take control from firmware */ if (adapter->has_manage && !adapter->has_amt) em_get_hw_control(adapter); INIT_DEBUGOUT("em_if_attach_post: end"); return (error); err_late: em_release_hw_control(adapter); em_free_pci_resources(ctx); em_if_queues_free(ctx); free(adapter->mta, M_DEVBUF); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int em_if_detach(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("em_detach: begin"); e1000_phy_hw_reset(&adapter->hw); em_release_manageability(adapter); em_release_hw_control(adapter); em_free_pci_resources(ctx); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int em_if_shutdown(if_ctx_t ctx) { return em_if_suspend(ctx); } /* * Suspend/resume device methods. */ static int em_if_suspend(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); em_release_manageability(adapter); em_release_hw_control(adapter); em_enable_wakeup(ctx); return (0); } static int em_if_resume(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if (adapter->hw.mac.type == e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); em_if_init(ctx); em_init_manageability(adapter); return(0); } static int em_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { int max_frame_size; struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); switch (adapter->hw.mac.type) { case e1000_82571: case e1000_82572: case e1000_ich9lan: case e1000_ich10lan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: case e1000_82574: case e1000_82583: case e1000_80003es2lan: /* 9K Jumbo Frame size */ max_frame_size = 9234; break; case e1000_pchlan: max_frame_size = 4096; break; /* Adapters that do not support jumbo frames */ case e1000_ich8lan: max_frame_size = ETHER_MAX_LEN; break; default: max_frame_size = MAX_JUMBO_FRAME_SIZE; } if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { return (EINVAL); } adapter->hw.mac.max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; return (0); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void em_if_init(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); INIT_DEBUGOUT("em_if_init: begin"); /* Get the latest mac address, User can use a LAA */ bcopy(if_getlladdr(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* * With the 82571 adapter, RAR[0] may be overwritten * when the other port is reset, we make a duplicate * in RAR[14] for that eventuality, this assures * the interface continues to function. */ if (adapter->hw.mac.type == e1000_82571) { e1000_set_laa_state_82571(&adapter->hw, TRUE); e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, E1000_RAR_ENTRIES - 1); } /* Initialize the hardware */ em_reset(ctx); em_if_update_admin_status(ctx); /* Setup VLAN support, basic and offload if available */ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); /* Clear bad data from Rx FIFOs */ if (adapter->hw.mac.type >= igb_mac_min) e1000_rx_fifo_flush_82575(&adapter->hw); /* Configure for OS presence */ em_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ em_initialize_transmit_unit(ctx); /* Setup Multicast table */ em_if_multi_set(ctx); /* ** Figure out the desired mbuf ** pool for doing jumbos */ if (adapter->hw.mac.max_frame_size <= 2048) adapter->rx_mbuf_sz = MCLBYTES; else if (adapter->hw.mac.max_frame_size <= 4096) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MJUM9BYTES; em_initialize_receive_unit(ctx); /* Use real VLAN Filter support? */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) /* Use real VLAN Filter support */ em_setup_vlan_hw_support(adapter); else { u32 ctrl; ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); } } /* Don't lose promiscuous settings */ em_if_set_promisc(ctx, IFF_PROMISC); e1000_clear_hw_cntrs_base_generic(&adapter->hw); /* MSI/X configuration for 82574 */ if (adapter->hw.mac.type == e1000_82574) { int tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); /* Set the IVAR - interrupt vector routing. */ E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars); } else if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */ igb_configure_queues(adapter); /* this clears any pending interrupts */ E1000_READ_REG(&adapter->hw, E1000_ICR); E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); /* AMT based hardware can now take control from firmware */ if (adapter->has_manage && adapter->has_amt) em_get_hw_control(adapter); /* Set Energy Efficient Ethernet */ if (adapter->hw.mac.type >= igb_mac_min && adapter->hw.phy.media_type == e1000_media_type_copper) { if (adapter->hw.mac.type == e1000_i354) e1000_set_eee_i354(&adapter->hw, TRUE, TRUE); else e1000_set_eee_i350(&adapter->hw, TRUE, TRUE); } } /********************************************************************* * * Fast Legacy/MSI Combined Interrupt Service routine * *********************************************************************/ int em_intr(void *arg) { struct adapter *adapter = arg; if_ctx_t ctx = adapter->ctx; u32 reg_icr; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (adapter->intr_type != IFLIB_INTR_LEGACY) goto skip_stray; /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; /* * Starting with the 82571 chip, bit 31 should be used to * determine whether the interrupt belongs to us. */ if (adapter->hw.mac.type >= e1000_82571 && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; skip_stray: /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; iflib_admin_intr_deferred(ctx); } if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return (FILTER_SCHEDULE_THREAD); } static void igb_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq) { E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxq->eims); } static void em_enable_queue(struct adapter *adapter, struct em_rx_queue *rxq) { E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxq->eims); } static int em_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct adapter *adapter = iflib_get_softc(ctx); struct em_rx_queue *rxq = &adapter->rx_queues[rxqid]; if (adapter->hw.mac.type >= igb_mac_min) igb_enable_queue(adapter, rxq); else em_enable_queue(adapter, rxq); return (0); } /********************************************************************* * * MSIX RX Interrupt Service routine * **********************************************************************/ static int em_msix_que(void *arg) { struct em_rx_queue *que = arg; ++que->irqs; return (FILTER_SCHEDULE_THREAD); } /********************************************************************* * * MSIX Link Fast Interrupt Service routine * **********************************************************************/ static int em_msix_link(void *arg) { struct adapter *adapter = arg; u32 reg_icr; ++adapter->link_irq; MPASS(adapter->hw.back != NULL); reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { em_handle_link(adapter->ctx); } else { E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); if (adapter->hw.mac.type >= igb_mac_min) E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask); } /* ** Because we must read the ICR for this interrupt ** it may clear other causes using autoclear, for ** this reason we simply create a soft interrupt ** for all these vectors. */ if (reg_icr && adapter->hw.mac.type < igb_mac_min) { E1000_WRITE_REG(&adapter->hw, E1000_ICS, adapter->ims); } return (FILTER_HANDLED); } static void em_handle_link(void *context) { if_ctx_t ctx = context; struct adapter *adapter = iflib_get_softc(ctx); adapter->hw.mac.get_link_status = 1; iflib_admin_intr_deferred(ctx); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct adapter *adapter = iflib_get_softc(ctx); u_char fiber_type = IFM_1000_SX; INIT_DEBUGOUT("em_if_media_status: begin"); iflib_admin_intr_deferred(ctx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { return; } ifmr->ifm_status |= IFM_ACTIVE; if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { if (adapter->hw.mac.type == e1000_82545) fiber_type = IFM_1000_LX; ifmr->ifm_active |= fiber_type | IFM_FDX; } else { switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int em_if_media_change(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("em_if_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } em_if_init(ctx); return (0); } static int em_if_set_promisc(if_ctx_t ctx, int flags) { struct adapter *adapter = iflib_get_softc(ctx); u32 reg_rctl; em_disable_promisc(ctx); reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (flags & IFF_PROMISC) { reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (em_debug_sbp) reg_rctl |= E1000_RCTL_SBP; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else if (flags & IFF_ALLMULTI) { reg_rctl |= E1000_RCTL_MPE; reg_rctl &= ~E1000_RCTL_UPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } return (0); } static void em_disable_promisc(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); u32 reg_rctl; int mcnt = 0; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= (~E1000_RCTL_UPE); if (if_getflags(ifp) & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else mcnt = if_multiaddr_count(ifp, MAX_NUM_MULTICAST_ADDRESSES); /* Don't disable if in MAX groups */ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) reg_rctl &= (~E1000_RCTL_MPE); reg_rctl &= (~E1000_RCTL_SBP); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void em_if_multi_set(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); u32 reg_rctl = 0; u8 *mta; /* Multicast array memory */ int mcnt = 0; IOCTL_DEBUGOUT("em_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_clear_mwi(&adapter->hw); reg_rctl |= E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); } if_multiaddr_array(ifp, mta, &mcnt, MAX_NUM_MULTICAST_ADDRESSES); if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= ~E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(&adapter->hw); } } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void em_if_timer(if_ctx_t ctx, uint16_t qid) { struct adapter *adapter = iflib_get_softc(ctx); struct em_rx_queue *que; int i; int trigger = 0; em_if_update_admin_status(ctx); em_update_stats_counters(adapter); /* Reset LAA into RAR[0] on 82571 */ if ((adapter->hw.mac.type == e1000_82571) && e1000_get_laa_state_82571(&adapter->hw)) e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); if (adapter->hw.mac.type < em_mac_min) lem_smartspeed(adapter); /* Mask to use in the irq trigger */ if (adapter->intr_type == IFLIB_INTR_MSIX) { for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) trigger |= que->eims; } else { trigger = E1000_ICS_RXDMT0; } } static void em_if_update_admin_status(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = iflib_get_ifp(ctx); device_t dev = iflib_get_dev(ctx); u32 link_check = 0; /* Get the cached link value or read phy for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { if (hw->mac.type == e1000_pch_spt) msec_delay(50); /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; if (link_check) /* ESB2 fix */ e1000_cfg_on_link_up(hw); } else { link_check = TRUE; } break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } /* Now check for a transition */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); /* Check if we must disable SPEED_MODE bit on PCI-E */ if ((adapter->link_speed != SPEED_1000) && ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572))) { int tarc0; tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); tarc0 &= ~TARC_SPEED_MODE_BIT; E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); } if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; adapter->smartspeed = 0; if_setbaudrate(ifp, adapter->link_speed * 1000000); iflib_link_state_change(ctx, LINK_STATE_UP, ifp->if_baudrate); printf("Link state changed to up\n"); } else if (!link_check && (adapter->link_active == 1)) { if_setbaudrate(ifp, 0); adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); adapter->link_active = 0; iflib_link_state_change(ctx, LINK_STATE_DOWN, ifp->if_baudrate); printf("link state changed to down\n"); } E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * * This routine should always be called with BOTH the CORE * and TX locks. **********************************************************************/ static void em_if_stop(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("em_stop: begin"); e1000_reset_hw(&adapter->hw); if (adapter->hw.mac.type >= e1000_82544) - E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); + E1000_WRITE_REG(&adapter->hw, E1000_WUFC, 0); e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void em_identify_hardware(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct adapter *adapter = iflib_get_softc(ctx); /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Do Shared Code Init and Setup */ if (e1000_set_mac_type(&adapter->hw)) { device_printf(dev, "Setup init failure\n"); return; } } static int em_allocate_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int rid, val; rid = PCIR_BAR(0); adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->memory == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->memory); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; /* Only older adapters use IO mapping */ if (adapter->hw.mac.type < em_mac_min && adapter->hw.mac.type > e1000_82543) { /* Figure our where our IO BAR is ? */ for (rid = PCIR_BAR(0); rid < PCIR_CIS;) { val = pci_read_config(dev, rid, 4); if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { adapter->io_rid = rid; break; } rid += 4; /* check for 64bit BAR */ if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) rid += 4; } if (rid >= PCIR_CIS) { device_printf(dev, "Unable to locate IO BAR\n"); return (ENXIO); } adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE); if (adapter->ioport == NULL) { device_printf(dev, "Unable to allocate bus resource: " "ioport\n"); return (ENXIO); } adapter->hw.io_base = 0; adapter->osdep.io_bus_space_tag = rman_get_bustag(adapter->ioport); adapter->osdep.io_bus_space_handle = rman_get_bushandle(adapter->ioport); } adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Setup the MSIX Interrupt handlers * **********************************************************************/ static int em_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct adapter *adapter = iflib_get_softc(ctx); struct em_rx_queue *rx_que = adapter->rx_queues; struct em_tx_queue *tx_que = adapter->tx_queues; int error, rid, i, vector = 0; char buf[16]; /* First set up ring resources */ for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) { rid = vector +1; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RX, em_msix_que, rx_que, rx_que->me, buf); if (error) { device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); adapter->rx_num_queues = i + 1; goto fail; } rx_que->msix = vector; /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 20 and 21 ** are for RX0 and RX1, note this has ** NOTHING to do with the MSIX vector */ if (adapter->hw.mac.type == e1000_82574) { rx_que->eims = 1 << (20 + i); adapter->ims |= rx_que->eims; adapter->ivars |= (8 | rx_que->msix) << (i * 4); } else if (adapter->hw.mac.type == e1000_82575) rx_que->eims = E1000_EICR_TX_QUEUE0 << vector; else rx_que->eims = 1 << vector; } for (i = 0; i < adapter->tx_num_queues; i++, tx_que++) { rid = vector + 1; snprintf(buf, sizeof(buf), "txq%d", i); tx_que = &adapter->tx_queues[i]; iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->me, buf); tx_que->msix = vector; /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 22 and 23 ** are for TX0 and TX1, note this has ** NOTHING to do with the MSIX vector */ if (adapter->hw.mac.type < igb_mac_min) { tx_que->eims = 1 << (22 + i); adapter->ims |= tx_que->eims; adapter->ivars |= (8 | tx_que->msix) << (8 + (i * 4)); } if (adapter->hw.mac.type == e1000_82575) tx_que->eims = E1000_EICR_TX_QUEUE0 << (i % adapter->tx_num_queues); else tx_que->eims = 1 << (i % adapter->tx_num_queues); } /* Link interrupt */ rid = vector + 1; error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); goto fail; } adapter->linkvec = vector; if (adapter->hw.mac.type < igb_mac_min) { adapter->ivars |= (8 | vector) << 16; adapter->ivars |= 0x80000000; } return (0); fail: iflib_irq_free(ctx, &adapter->irq); rx_que = adapter->rx_queues; for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (error); } static void igb_configure_queues(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct em_rx_queue *rx_que; struct em_tx_queue *tx_que; u32 tmp, ivar = 0, newitr = 0; /* First turn on RSS capability */ if (adapter->hw.mac.type != e1000_82575) E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME | E1000_GPIE_PBA | E1000_GPIE_NSICR); /* Turn on MSIX */ switch (adapter->hw.mac.type) { case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: case e1000_vfadapt: case e1000_vfadapt_i350: /* RX entries */ for (int i = 0; i < adapter->rx_num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); rx_que = &adapter->rx_queues[i]; if (i & 1) { ivar &= 0xFF00FFFF; ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16; } else { ivar &= 0xFFFFFF00; ivar |= rx_que->msix | E1000_IVAR_VALID; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); } /* TX entries */ for (int i = 0; i < adapter->tx_num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); tx_que = &adapter->tx_queues[i]; if (i & 1) { ivar &= 0x00FFFFFF; ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24; } else { ivar &= 0xFFFF00FF; ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= tx_que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82576: /* RX entries */ for (int i = 0; i < adapter->rx_num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); rx_que = &adapter->rx_queues[i]; if (i < 8) { ivar &= 0xFFFFFF00; ivar |= rx_que->msix | E1000_IVAR_VALID; } else { ivar &= 0xFF00FFFF; ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= rx_que->eims; } /* TX entries */ for (int i = 0; i < adapter->tx_num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); tx_que = &adapter->tx_queues[i]; if (i < 8) { ivar &= 0xFFFF00FF; ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8; } else { ivar &= 0x00FFFFFF; ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= tx_que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82575: /* enable MSI-X support*/ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask interrupts upon ICR read. */ tmp |= E1000_CTRL_EXT_EIAME; tmp |= E1000_CTRL_EXT_IRCA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); /* Queues */ for (int i = 0; i < adapter->rx_num_queues; i++) { rx_que = &adapter->rx_queues[i]; tmp = E1000_EICR_RX_QUEUE0 << i; tmp |= E1000_EICR_TX_QUEUE0 << i; rx_que->eims = tmp; E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), i, rx_que->eims); adapter->que_mask |= rx_que->eims; } /* Link */ E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec), E1000_EIMS_OTHER); adapter->link_mask |= E1000_EIMS_OTHER; default: break; } /* Set the starting interrupt rate */ if (em_max_interrupt_rate > 0) newitr = (4000000 / em_max_interrupt_rate) & 0x7FFC; if (hw->mac.type == e1000_82575) newitr |= newitr << 16; else newitr |= E1000_EITR_CNT_IGNR; for (int i = 0; i < adapter->rx_num_queues; i++) { rx_que = &adapter->rx_queues[i]; E1000_WRITE_REG(hw, E1000_EITR(rx_que->msix), newitr); } return; } static void em_free_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct em_rx_queue *que = adapter->rx_queues; device_t dev = iflib_get_dev(ctx); /* Release all msix queue resources */ if (adapter->intr_type == IFLIB_INTR_MSIX) iflib_irq_free(ctx, &adapter->irq); for (int i = 0; i < adapter->rx_num_queues; i++, que++) { iflib_irq_free(ctx, &que->que_irq); } /* First release all the interrupt resources */ if (adapter->memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->memory); adapter->memory = NULL; } if (adapter->flash != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH, adapter->flash); adapter->flash = NULL; } if (adapter->ioport != NULL) bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, adapter->ioport); } /* Setup MSI or MSI/X */ static int em_setup_msix(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if (adapter->hw.mac.type == e1000_82574) { em_enable_vectors_82574(ctx); } return (0); } /********************************************************************* * * Initialize the hardware to a configuration * as specified by the adapter structure. * **********************************************************************/ static void lem_smartspeed(struct adapter *adapter) { u16 phy_tmp; if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) || adapter->hw.mac.autoneg == 0 || (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) return; if (adapter->smartspeed == 0) { /* If Master/Slave config fault is asserted twice, * we assume back-to-back */ e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return; e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); if(phy_tmp & CR_1000T_MS_ENABLE) { phy_tmp &= ~CR_1000T_MS_ENABLE; e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); adapter->smartspeed++; if(adapter->hw.mac.autoneg && !e1000_copper_link_autoneg(&adapter->hw) && !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { phy_tmp |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); } } } return; } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { /* If still no link, perhaps using 2/3 pair cable */ e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); phy_tmp |= CR_1000T_MS_ENABLE; e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); if(adapter->hw.mac.autoneg && !e1000_copper_link_autoneg(&adapter->hw) && !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { phy_tmp |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); } } /* Restart process after EM_SMARTSPEED_MAX iterations */ if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) adapter->smartspeed = 0; } static void em_reset(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); struct e1000_hw *hw = &adapter->hw; u16 rx_buffer_size; u32 pba; INIT_DEBUGOUT("em_reset: begin"); /* Set up smart power down as default off on newer adapters. */ if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572)) { u16 phy_tmp = 0; /* Speed up time to link by disabling smart power down. */ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp); phy_tmp &= ~IGP02E1000_PM_SPD; e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp); } /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ switch (hw->mac.type) { /* Total Packet Buffer on these is 48K */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ break; case e1000_82573: /* 82573: Total Packet Buffer is 32K */ pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ break; case e1000_82574: case e1000_82583: pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ break; case e1000_ich8lan: pba = E1000_PBA_8K; break; case e1000_ich9lan: case e1000_ich10lan: /* Boost Receive side for jumbo frames */ if (adapter->hw.mac.max_frame_size > 4096) pba = E1000_PBA_14K; else pba = E1000_PBA_10K; break; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: pba = E1000_PBA_26K; break; default: if (adapter->hw.mac.max_frame_size > 8192) pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ else pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ } E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. Here we use an arbitrary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer * by 1500. * - The pause time is fairly large at 1000 x 512ns = 512 usec. */ rx_buffer_size = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10 ); hw->fc.high_water = rx_buffer_size - roundup2(adapter->hw.mac.max_frame_size, 1024); hw->fc.low_water = hw->fc.high_water - 1500; if (adapter->fc) /* locally set flow control value? */ hw->fc.requested_mode = adapter->fc; else hw->fc.requested_mode = e1000_fc_full; if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; else hw->fc.pause_time = EM_FC_PAUSE_TIME; hw->fc.send_xon = TRUE; /* Device specific overrides/settings */ switch (hw->mac.type) { case e1000_pchlan: /* Workaround: no TX flow ctrl for PCH */ hw->fc.requested_mode = e1000_fc_rx_pause; hw->fc.pause_time = 0xFFFF; /* override */ if (if_getmtu(ifp) > ETHERMTU) { hw->fc.high_water = 0x3500; hw->fc.low_water = 0x1500; } else { hw->fc.high_water = 0x5000; hw->fc.low_water = 0x3000; } hw->fc.refresh_time = 0x1000; break; case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: hw->fc.high_water = 0x5C20; hw->fc.low_water = 0x5048; hw->fc.pause_time = 0x0650; hw->fc.refresh_time = 0x0400; /* Jumbos need adjusted PBA */ if (if_getmtu(ifp) > ETHERMTU) E1000_WRITE_REG(hw, E1000_PBA, 12); else E1000_WRITE_REG(hw, E1000_PBA, 26); break; case e1000_ich9lan: case e1000_ich10lan: if (if_getmtu(ifp) > ETHERMTU) { hw->fc.high_water = 0x2800; hw->fc.low_water = hw->fc.high_water - 8; break; } /* else fall thru */ default: if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; break; } /* Issue a global reset */ e1000_reset_hw(hw); - E1000_WRITE_REG(hw, E1000_WUC, 0); + E1000_WRITE_REG(hw, E1000_WUFC, 0); em_disable_aspm(adapter); /* and a re-init */ if (e1000_init_hw(hw) < 0) { device_printf(dev, "Hardware Initialization Failed\n"); return; } E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); } #define RSSKEYLEN 10 static void em_initialize_rss_mapping(struct adapter *adapter) { uint8_t rss_key[4 * RSSKEYLEN]; uint32_t reta = 0; struct e1000_hw *hw = &adapter->hw; int i; /* * Configure RSS key */ arc4rand(rss_key, sizeof(rss_key), 0); for (i = 0; i < RSSKEYLEN; ++i) { uint32_t rssrk = 0; rssrk = EM_RSSRK_VAL(rss_key, i); E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk); } /* * Configure RSS redirect table in following fashion: * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] */ for (i = 0; i < sizeof(reta); ++i) { uint32_t q; q = (i % adapter->rx_num_queues) << 7; reta |= q << (8 * i); } for (i = 0; i < 32; ++i) E1000_WRITE_REG(hw, E1000_RETA(i), reta); E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX | E1000_MRQC_RSS_FIELD_IPV6_EX | E1000_MRQC_RSS_FIELD_IPV6); } static void igb_initialize_rss_mapping(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; int i; int queue_id; u32 reta; u32 rss_key[10], mrqc, shift = 0; /* XXX? */ if (adapter->hw.mac.type == e1000_82575) shift = 6; /* * The redirection table controls which destination * queue each bucket redirects traffic to. * Each DWORD represents four queues, with the LSB * being the first queue in the DWORD. * * This just allocates buckets to queues using round-robin * allocation. * * NOTE: It Just Happens to line up with the default * RSS allocation method. */ /* Warning FM follows */ reta = 0; for (i = 0; i < 128; i++) { #ifdef RSS queue_id = rss_get_indirection_to_bucket(i); /* * If we have more queues than buckets, we'll * end up mapping buckets to a subset of the * queues. * * If we have more buckets than queues, we'll * end up instead assigning multiple buckets * to queues. * * Both are suboptimal, but we need to handle * the case so we don't go out of bounds * indexing arrays and such. */ queue_id = queue_id % adapter->rx_num_queues; #else queue_id = (i % adapter->rx_num_queues); #endif /* Adjust if required */ queue_id = queue_id << shift; /* * The low 8 bits are for hash value (n+0); * The next 8 bits are for hash value (n+1), etc. */ reta = reta >> 8; reta = reta | ( ((uint32_t) queue_id) << 24); if ((i & 3) == 3) { E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); reta = 0; } } /* Now fill in hash table */ /* * MRQC: Multiple Receive Queues Command * Set queuing to RSS control, number depends on the device. */ mrqc = E1000_MRQC_ENABLE_RSS_8Q; #ifdef RSS /* XXX ew typecasting */ rss_getkey((uint8_t *) &rss_key); #else arc4rand(&rss_key, sizeof(rss_key), 0); #endif for (i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key[i]); /* * Configure the RSS fields to hash upon. */ mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP); mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | E1000_MRQC_RSS_FIELD_IPV6_UDP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); E1000_WRITE_REG(hw, E1000_MRQC, mrqc); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int em_setup_interface(if_ctx_t ctx) { struct ifnet *ifp = iflib_get_ifp(ctx); struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; uint64_t cap = 0; INIT_DEBUGOUT("em_setup_interface: begin"); /* TSO parameters */ ifp->if_hw_tsomax = IP_MAXPACKET; /* Take m_pullup(9)'s in em_xmit() w/ TSO into acount. */ ifp->if_hw_tsomaxsegcount = EM_MAX_SCATTER - 5; ifp->if_hw_tsomaxsegsize = EM_TSO_SEG_SIZE; /* Single Queue */ if (adapter->tx_num_queues == 1) { if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1); if_setsendqready(ifp); } cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4; cap |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; /* * Tell the upper layer(s) we * support full VLAN capability */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapabilitiesbit(ifp, cap, 0); /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the em driver you can ** enable this and get full hardware tag filtering. */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER,0); /* Enable only WOL MAGIC by default */ if (adapter->wol) { - if_setcapabilitiesbit(ifp, IFCAP_WOL, 0); - if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, 0); - } + if_setcapenablebit(ifp, IFCAP_WOL_MAGIC, + IFCAP_WOL_MCAST| IFCAP_WOL_UCAST); + } else { + if_setcapenablebit(ifp, 0, IFCAP_WOL_MAGIC | + IFCAP_WOL_MCAST| IFCAP_WOL_UCAST); + } /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { u_char fiber_type = IFM_1000_SX; /* default type */ if (adapter->hw.mac.type == e1000_82545) fiber_type = IFM_1000_LX; ifmedia_add(adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | fiber_type, 0, NULL); } else { ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); return (0); } static int em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; int error = E1000_SUCCESS; struct em_tx_queue *que; int i; MPASS(adapter->tx_num_queues > 0); MPASS(adapter->tx_num_queues == ntxqsets); /* First allocate the top level queue structs */ if (!(adapter->tx_queues = (struct em_tx_queue *) malloc(sizeof(struct em_tx_queue) * adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); return(ENOMEM); } for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) { /* Set up some basics */ struct tx_ring *txr = &que->txr; txr->adapter = que->adapter = adapter; txr->que = que; que->me = txr->me = i; /* Allocate transmit buffer memory */ if (!(txr->tx_buffers = (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "failed to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* get the virtual and physical address of the hardware queues */ txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs]; txr->tx_paddr = paddrs[i*ntxqs]; } device_printf(iflib_get_dev(ctx), "allocated for %d tx_queues\n", adapter->tx_num_queues); return (0); fail: em_if_queues_free(ctx); return (error); } static int em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct adapter *adapter = iflib_get_softc(ctx); int error = E1000_SUCCESS; struct em_rx_queue *que; int i; MPASS(adapter->rx_num_queues > 0); MPASS(adapter->rx_num_queues == nrxqsets); /* First allocate the top level queue structs */ if (!(adapter->rx_queues = (struct em_rx_queue *) malloc(sizeof(struct em_rx_queue) * adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); error = ENOMEM; goto fail; } for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { /* Set up some basics */ struct rx_ring *rxr = &que->rxr; rxr->adapter = que->adapter = adapter; rxr->que = que; que->me = rxr->me = i; /* get the virtual and physical address of the hardware queues */ rxr->rx_base = (union e1000_rx_desc_extended *)vaddrs[i*nrxqs]; rxr->rx_paddr = paddrs[i*nrxqs]; } device_printf(iflib_get_dev(ctx), "allocated for %d rx_queues\n", adapter->rx_num_queues); return (0); fail: em_if_queues_free(ctx); return (error); } static void em_if_queues_free(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct em_tx_queue *tx_que = adapter->tx_queues; struct em_rx_queue *rx_que = adapter->rx_queues; if (tx_que != NULL) { for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; if (txr->tx_buffers == NULL) break; free(txr->tx_buffers, M_DEVBUF); txr->tx_buffers = NULL; } free(adapter->tx_queues, M_DEVBUF); adapter->tx_queues = NULL; } if (rx_que != NULL) { free(adapter->rx_queues, M_DEVBUF); adapter->rx_queues = NULL; } em_release_hw_control(adapter); if (adapter->mta != NULL) { free(adapter->mta, M_DEVBUF); } } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void em_initialize_transmit_unit(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct em_tx_queue *que; struct tx_ring *txr; struct e1000_hw *hw = &adapter->hw; u32 tctl, txdctl = 0, tarc, tipg = 0; INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { u64 bus_addr; caddr_t offp, endp; que = &adapter->tx_queues[i]; txr = &que->txr; bus_addr = txr->tx_paddr; /*Enable all queues */ em_init_tx_ring(que); /* Clear checksum offload context. */ offp = (caddr_t)&txr->csum_flags; endp = (caddr_t)(txr + 1); bzero(offp, endp - offp); /* Base and Len of TX Ring */ E1000_WRITE_REG(hw, E1000_TDLEN(i), scctx->isc_ntxd[0] * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(hw, E1000_TDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_TDBAL(i), (u32)bus_addr); /* Init the HEAD/TAIL indices */ E1000_WRITE_REG(hw, E1000_TDT(i), 0); E1000_WRITE_REG(hw, E1000_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)), E1000_READ_REG(&adapter->hw, E1000_TDLEN(i))); txdctl = 0; /* clear txdctl */ txdctl |= 0x1f; /* PTHRESH */ txdctl |= 1 << 8; /* HTHRESH */ txdctl |= 1 << 16;/* WTHRESH */ txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ txdctl |= E1000_TXDCTL_GRAN; txdctl |= 1 << 25; /* LWTHRESH */ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); } /* Set the default values for the Tx Inter Packet Gap timer */ switch (adapter->hw.mac.type) { case e1000_80003es2lan: tipg = DEFAULT_82543_TIPG_IPGR1; tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; case e1000_82542: tipg = DEFAULT_82542_TIPG_IPGT; tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else tipg = DEFAULT_82543_TIPG_IPGT_COPPER; tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value); if(adapter->hw.mac.type >= e1000_82540) E1000_WRITE_REG(&adapter->hw, E1000_TADV, adapter->tx_abs_int_delay.value); if ((adapter->hw.mac.type == e1000_82571) || (adapter->hw.mac.type == e1000_82572)) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= TARC_SPEED_MODE_BIT; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } else if (adapter->hw.mac.type == e1000_80003es2lan) { /* errata: program both queues to unweighted RR */ tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } else if (adapter->hw.mac.type == e1000_82574) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= TARC_ERRATA_BIT; if ( adapter->tx_num_queues > 1) { tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX); E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } else E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } if (adapter->tx_int_delay.value > 0) adapter->txd_cmd |= E1000_TXD_CMD_IDE; /* Program the Transmit Control Register */ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); if (adapter->hw.mac.type >= e1000_82571) tctl |= E1000_TCTL_MULR; /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); if (hw->mac.type == e1000_pch_spt) { u32 reg; reg = E1000_READ_REG(hw, E1000_IOSFPC); reg |= E1000_RCTL_RDMTS_HEX; E1000_WRITE_REG(hw, E1000_IOSFPC, reg); reg = E1000_READ_REG(hw, E1000_TARC(0)); reg |= E1000_TARC0_CB_MULTIQ_3_REQ; E1000_WRITE_REG(hw, E1000_TARC(0), reg); } } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void em_initialize_receive_unit(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct ifnet *ifp = iflib_get_ifp(ctx); struct e1000_hw *hw = &adapter->hw; struct em_rx_queue *que; int i; u32 rctl, rxcsum, rfctl; INIT_DEBUGOUT("em_initialize_receive_units: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(hw, E1000_RCTL); /* Do not disable if ever enabled on this hardware */ if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Do not store bad packets */ rctl &= ~E1000_RCTL_SBP; /* Enable Long Packet receive */ if (if_getmtu(ifp) > ETHERMTU) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; /* Strip the CRC */ if (!em_disable_crc_stripping) rctl |= E1000_RCTL_SECRC; if (adapter->hw.mac.type >= e1000_82540) { E1000_WRITE_REG(&adapter->hw, E1000_RADV, adapter->rx_abs_int_delay.value); /* * Set the interrupt throttling rate. Value is calculated * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR); } E1000_WRITE_REG(&adapter->hw, E1000_RDTR, adapter->rx_int_delay.value); /* Use extended rx descriptor formats */ rfctl = E1000_READ_REG(hw, E1000_RFCTL); rfctl |= E1000_RFCTL_EXTEN; /* ** When using MSIX interrupts we need to throttle ** using the EITR register (82574 only) */ if (hw->mac.type == e1000_82574) { for (int i = 0; i < 4; i++) E1000_WRITE_REG(hw, E1000_EITR_82574(i), DEFAULT_ITR); /* Disable accelerated acknowledge */ rfctl |= E1000_RFCTL_ACK_DIS; } E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); if (if_getcapenable(ifp) & IFCAP_RXCSUM && adapter->hw.mac.type >= e1000_82543) { if (adapter->tx_num_queues > 1) { if (adapter->hw.mac.type >= igb_mac_min) { rxcsum |= E1000_RXCSUM_PCSD; if (hw->mac.type != e1000_82575) rxcsum |= E1000_RXCSUM_CRCOFL; } else rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL | E1000_RXCSUM_PCSD; } else { if (adapter->hw.mac.type >= igb_mac_min) rxcsum |= E1000_RXCSUM_IPPCSE; else rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL; if (adapter->hw.mac.type > e1000_82575) rxcsum |= E1000_RXCSUM_CRCOFL; } } else rxcsum &= ~E1000_RXCSUM_TUOFL; E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); if (adapter->rx_num_queues > 1) { if (adapter->hw.mac.type >= igb_mac_min) igb_initialize_rss_mapping(adapter); else em_initialize_rss_mapping(adapter); } /* ** XXX TEMPORARY WORKAROUND: on some systems with 82573 ** long latencies are observed, like Lenovo X60. This ** change eliminates the problem, but since having positive ** values in RDTR is a known source of problems on other ** platforms another solution is being sought. */ if (hw->mac.type == e1000_82573) E1000_WRITE_REG(hw, E1000_RDTR, 0x20); for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; /* Setup the Base and Length of the Rx Descriptor Ring */ u64 bus_addr = rxr->rx_paddr; #if 0 u32 rdt = adapter->rx_num_queues -1; /* default */ #endif E1000_WRITE_REG(hw, E1000_RDLEN(i), scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr); /* Setup the Head and Tail Descriptor Pointers */ E1000_WRITE_REG(hw, E1000_RDH(i), 0); E1000_WRITE_REG(hw, E1000_RDT(i), 0); } /* * Set PTHRESH for improved jumbo performance * According to 10.2.5.11 of Intel 82574 Datasheet, * RXDCTL(1) is written whenever RXDCTL(0) is written. * Only write to RXDCTL(1) if there is a need for different * settings. */ if (((adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_pch2lan) || (adapter->hw.mac.type == e1000_ich10lan)) && (if_getmtu(ifp) > ETHERMTU)) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); } else if (adapter->hw.mac.type == e1000_82574) { for (int i = 0; i < adapter->rx_num_queues; i++) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); rxdctl |= 0x20; /* PTHRESH */ rxdctl |= 4 << 8; /* HTHRESH */ rxdctl |= 4 << 16;/* WTHRESH */ rxdctl |= 1 << 24; /* Switch to granularity */ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } } else if (adapter->hw.mac.type >= igb_mac_min) { u32 psize, srrctl = 0; if (ifp->if_mtu > ETHERMTU) { rctl |= E1000_RCTL_LPE; /* Set maximum packet len */ psize = scctx->isc_max_frame_size; if (psize <= 4096) { srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; } else if (psize > 4096) { srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; } /* are we on a vlan? */ if (ifp->if_vlantrunk != NULL) psize += VLAN_TAG_SIZE; E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); } else { rctl &= ~E1000_RCTL_LPE; srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_2048; } /* * If TX flow control is disabled and there's >1 queue defined, * enable DROP. * * This drops frames rather than hanging the RX MAC for all queues. */ if ((adapter->rx_num_queues > 1) && (adapter->fc == e1000_fc_none || adapter->fc == e1000_fc_rx_pause)) { srrctl |= E1000_SRRCTL_DROP_EN; } /* Setup the Base and Length of the Rx Descriptor Rings */ for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; u64 bus_addr = rxr->rx_paddr; u32 rxdctl; #ifdef notyet /* Configure for header split? -- ignore for now */ rxr->hdr_split = igb_header_split; #else srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; #endif E1000_WRITE_REG(hw, E1000_RDLEN(i), scctx->isc_nrxd[0] * sizeof(struct e1000_rx_desc)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (uint32_t)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); /* Enable this Queue */ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGB_RX_PTHRESH; rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } } if (adapter->hw.mac.type >= e1000_pch2lan) { if (if_getmtu(ifp) > ETHERMTU) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; if (adapter->rx_mbuf_sz == MCLBYTES) rctl |= E1000_RCTL_SZ_2048; else if (adapter->rx_mbuf_sz == MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; /* ensure we clear use DTYPE of 00 here */ rctl &= ~0x00000C00; /* Write out the settings */ E1000_WRITE_REG(hw, E1000_RCTL, rctl); return; } static void em_if_vlan_register(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u32 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; } static void em_if_vlan_unregister(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u32 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; } static void em_setup_vlan_hw_support(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; /* ** We get here thru init_locked, meaning ** a soft reset, this has already cleared ** the VFTA and other state, so if there ** have been no vlan's registered do nothing. */ if (adapter->num_vlans == 0) return; /* ** A soft reset zero's out the VFTA, so ** we need to repopulate it now. */ for (int i = 0; i < EM_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, adapter->shadow_vfta[i]); reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Enable the Filter Table */ reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } static void em_if_enable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct e1000_hw *hw = &adapter->hw; u32 ims_mask = IMS_ENABLE_MASK; if (hw->mac.type == e1000_82574) { E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK); ims_mask |= adapter->ims; } if (adapter->intr_type == IFLIB_INTR_MSIX && hw->mac.type >= igb_mac_min) { u32 mask = (adapter->que_mask | adapter->link_mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask); ims_mask = E1000_IMS_LSC; } E1000_WRITE_REG(hw, E1000_IMS, ims_mask); } static void em_if_disable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct e1000_hw *hw = &adapter->hw; if (adapter->intr_type == IFLIB_INTR_MSIX) { if (hw->mac.type >= igb_mac_min) E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); } E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void em_init_manageability(struct adapter *adapter) { /* A shared code workaround */ #define E1000_82542_MANC2H E1000_MANC2H if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ manc |= E1000_MANC_EN_MNG2HOST; #define E1000_MNG2HOST_PORT_623 (1 << 5) #define E1000_MNG2HOST_PORT_664 (1 << 6) manc2h |= E1000_MNG2HOST_PORT_623; manc2h |= E1000_MNG2HOST_PORT_664; E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void em_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means * that the driver is loaded. For AMT version type f/w * this means that the network i/f is open. */ static void em_get_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); return; } /* * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is no longer loaded. For AMT versions of the * f/w this means that the network i/f is closed. */ static void em_release_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (!adapter->has_manage) return; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); return; } static int em_is_valid_ether_addr(u8 *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* ** Parse the interface capabilities with regard ** to both system management and wake-on-lan for ** later use. */ static void em_get_wakeup(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); u16 eeprom_data = 0, device_id, apme_mask; adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); apme_mask = EM_EEPROM_APME; switch (adapter->hw.mac.type) { case e1000_82542: case e1000_82543: break; case e1000_82544: e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL2_REG, 1, &eeprom_data); apme_mask = EM_82544_APME; break; case e1000_82546: case e1000_82546_rev_3: if (adapter->hw.bus.func == 1) { e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; } else e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; case e1000_82573: case e1000_82583: adapter->has_amt = TRUE; /* Falls thru */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: if (adapter->hw.bus.func == 1) { e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; } else e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: + case e1000_82575: /* listing all igb devices */ + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + case e1000_vfadapt: + case e1000_vfadapt_i350: apme_mask = E1000_WUC_APME; adapter->has_amt = TRUE; eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); break; default: e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; } if (eeprom_data & apme_mask) adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC); /* * We have the eeprom settings, now apply the special cases * where the eeprom may be wrong or the board won't support * wake on lan on a particular port */ device_id = pci_get_device(dev); switch (device_id) { case E1000_DEV_ID_82546GB_PCIE: adapter->wol = 0; break; case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->wol = 0; break; case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->wol = 0; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; case E1000_DEV_ID_82571EB_FIBER: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->wol = 0; break; case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->wol = 0; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; } return; } /* * Enable PCI Wake On Lan capability */ static void em_enable_wakeup(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); if_t ifp = iflib_get_ifp(ctx); u32 pmc, ctrl, ctrl_ext, rctl, wuc; u16 status; if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0)) return; /* Advertise the wakeup capability */ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); wuc = E1000_READ_REG(&adapter->hw, E1000_WUC); - wuc |= E1000_WUC_PME_EN ; + wuc |= (E1000_WUC_PME_EN | E1000_WUC_APME); E1000_WRITE_REG(&adapter->hw, E1000_WUC, wuc); if ((adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_pchlan) || (adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_ich10lan)) e1000_suspend_workarounds_ich8lan(&adapter->hw); /* Keep the laser running on Fiber adapters */ if (adapter->hw.phy.media_type == e1000_media_type_fiber || adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext); } /* ** Determine type of Wakeup: note that wol ** is set with all bits on by default. */ if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) adapter->wol &= ~E1000_WUFC_MAG; + if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0) + adapter->wol &= ~E1000_WUFC_EX; + if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) adapter->wol &= ~E1000_WUFC_MC; else { rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); } - if ((adapter->hw.mac.type == e1000_pchlan) || - (adapter->hw.mac.type == e1000_pch2lan) || - (adapter->hw.mac.type == e1000_pch_lpt) || - (adapter->hw.mac.type == e1000_pch_spt)) { + if ( adapter->hw.mac.type >= e1000_pchlan) { if (em_enable_phy_wakeup(adapter)) return; } else { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); } if (adapter->hw.phy.type == e1000_phy_igp_3) e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); /* Request PME */ status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if (if_getcapenable(ifp) & IFCAP_WOL) status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); return; } /* ** WOL in the newer chipset interfaces (pchlan) ** require thing to be copied into the phy */ static int em_enable_phy_wakeup(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mreg, ret = 0; u16 preg; /* copy MAC RARs to PHY RARs */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* copy MAC MTA to PHY MTA */ for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) { mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF)); e1000_write_phy_reg(hw, BM_MTA(i) + 1, (u16)((mreg >> 16) & 0xFFFF)); } /* configure PHY Rx Control register */ e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg); mreg = E1000_READ_REG(hw, E1000_RCTL); if (mreg & E1000_RCTL_UPE) preg |= BM_RCTL_UPE; if (mreg & E1000_RCTL_MPE) preg |= BM_RCTL_MPE; preg &= ~(BM_RCTL_MO_MASK); if (mreg & E1000_RCTL_MO_3) preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) << BM_RCTL_MO_SHIFT); if (mreg & E1000_RCTL_BAM) preg |= BM_RCTL_BAM; if (mreg & E1000_RCTL_PMCF) preg |= BM_RCTL_PMCF; mreg = E1000_READ_REG(hw, E1000_CTRL); if (mreg & E1000_CTRL_RFCE) preg |= BM_RCTL_RFCE; e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg); /* enable PHY wakeup in MAC register */ E1000_WRITE_REG(hw, E1000_WUC, - E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); + E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN | E1000_WUC_APME); E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol); /* configure and enable PHY wakeup in PHY registers */ e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol); e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); /* activate PHY wakeup */ ret = hw->phy.ops.acquire(hw); if (ret) { printf("Could not acquire PHY\n"); return ret; } e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); if (ret) { printf("Could not read PHY page 769\n"); goto out; } preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); if (ret) printf("Could not set PHY Host Wakeup bit\n"); out: hw->phy.ops.release(hw); return ret; } static void em_if_led_func(if_ctx_t ctx, int onoff) { struct adapter *adapter = iflib_get_softc(ctx); if (onoff) { e1000_setup_led(&adapter->hw); e1000_led_on(&adapter->hw); } else { e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } } /* ** Disable the L0S and L1 LINK states */ static void em_disable_aspm(struct adapter *adapter) { int base, reg; u16 link_cap,link_ctrl; device_t dev = adapter->dev; switch (adapter->hw.mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: break; default: return; } if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0) return; reg = base + PCIER_LINK_CAP; link_cap = pci_read_config(dev, reg, 2); if ((link_cap & PCIEM_LINK_CAP_ASPM) == 0) return; reg = base + PCIER_LINK_CTL; link_ctrl = pci_read_config(dev, reg, 2); link_ctrl &= ~PCIEM_LINK_CTL_ASPMC; pci_write_config(dev, reg, link_ctrl, 2); return; } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void em_update_stats_counters(struct adapter *adapter) { if(adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS); adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); } adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32); adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32); adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); /* Interrupt Counts */ adapter->stats.iac += E1000_READ_REG(&adapter->hw, E1000_IAC); adapter->stats.icrxptc += E1000_READ_REG(&adapter->hw, E1000_ICRXPTC); adapter->stats.icrxatc += E1000_READ_REG(&adapter->hw, E1000_ICRXATC); adapter->stats.ictxptc += E1000_READ_REG(&adapter->hw, E1000_ICTXPTC); adapter->stats.ictxatc += E1000_READ_REG(&adapter->hw, E1000_ICTXATC); adapter->stats.ictxqec += E1000_READ_REG(&adapter->hw, E1000_ICTXQEC); adapter->stats.ictxqmtc += E1000_READ_REG(&adapter->hw, E1000_ICTXQMTC); adapter->stats.icrxdmtc += E1000_READ_REG(&adapter->hw, E1000_ICRXDMTC); adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC); if (adapter->hw.mac.type >= e1000_82543) { adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, E1000_RXERRC); adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, E1000_TNCRS); adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, E1000_CEXTERR); adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, E1000_TSCTC); adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, E1000_TSCTFC); } } static uint64_t em_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_COLLISIONS: return (adapter->stats.colc); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.mpc + adapter->stats.cexterr); case IFCOUNTER_OERRORS: return (adapter->stats.ecol + adapter->stats.latecol + adapter->watchdog_events); default: return (if_get_counter_default(ifp, cnt)); } } /* Export a single 32-bit register via a read-only sysctl. */ static int em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* * Add sysctl variables, one per statistic, to the system. */ static void em_add_hw_stats(struct adapter *adapter) { device_t dev = iflib_get_dev(adapter->ctx); struct em_tx_queue *tx_que = adapter->tx_queues; struct em_rx_queue *rx_que = adapter->rx_queues; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct e1000_hw_stats *stats = &adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSIX IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail", CTLFLAG_RD, &adapter->mbuf_defrag_failed, "Defragmenting mbuf chain failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", CTLFLAG_RD, &adapter->no_tx_dma_setup, "Driver tx dma failure in xmit"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL, em_sysctl_reg_handler, "IU", "Device Control Register"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL, em_sysctl_reg_handler, "IU", "Receiver Control Register"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "TX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", CTLFLAG_RD, &txr->tx_irq, "Queue MSI-X Transmit Interrupts"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, "Queue No Descriptor Available"); } for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "RX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", CTLFLAG_RD, &rxr->rx_irq, "Queue MSI-X Receive Interrupts"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD, NULL, "Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &adapter->stats.symerrs, "Symbol Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &adapter->stats.sec, "Sequence Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &adapter->stats.dc, "Defer Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &adapter->stats.mpc, "Missed Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &adapter->stats.rnbc, "Receive No Buffers"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &adapter->stats.ruc, "Receive Undersize"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &adapter->stats.rfc, "Fragmented Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &adapter->stats.roc, "Oversized Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &adapter->stats.rjc, "Recevied Jabber"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &adapter->stats.rxerrc, "Receive Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &adapter->stats.crcerrs, "CRC errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &adapter->stats.algnerrc, "Alignment Errors"); /* On 82575 these are collision counts */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", CTLFLAG_RD, &adapter->stats.cexterr, "Collision/Carrier extension errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &adapter->stats.xonrxc, "XON Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &adapter->stats.xontxc, "XON Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &adapter->stats.xoffrxc, "XOFF Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &adapter->stats.xofftxc, "XOFF Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &adapter->stats.tpr, "Total Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &adapter->stats.gprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.bprc, "Broadcast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.mprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &adapter->stats.prc64, "64 byte frames received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &adapter->stats.prc127, "65-127 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &adapter->stats.prc255, "128-255 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &adapter->stats.prc511, "256-511 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &adapter->stats.prc1023, "512-1023 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &adapter->stats.gorc, "Good Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &adapter->stats.gotc, "Good Octets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &adapter->stats.tpt, "Total Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &adapter->stats.gptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &adapter->stats.bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &adapter->stats.mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &adapter->stats.ptc64, "64 byte frames transmitted "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &adapter->stats.ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &adapter->stats.ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &adapter->stats.ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &adapter->stats.ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &adapter->stats.tsctc, "TSO Contexts Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", CTLFLAG_RD, &adapter->stats.tsctfc, "TSO Contexts Failed"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &adapter->stats.iac, "Interrupt Assertion Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", CTLFLAG_RD, &adapter->stats.icrxptc, "Interrupt Cause Rx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", CTLFLAG_RD, &adapter->stats.icrxatc, "Interrupt Cause Rx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", CTLFLAG_RD, &adapter->stats.ictxptc, "Interrupt Cause Tx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", CTLFLAG_RD, &adapter->stats.ictxatc, "Interrupt Cause Tx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", CTLFLAG_RD, &adapter->stats.ictxqec, "Interrupt Cause Tx Queue Empty Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", CTLFLAG_RD, &adapter->stats.ictxqmtc, "Interrupt Cause Tx Queue Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &adapter->stats.icrxdmtc, "Interrupt Cause Rx Desc Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun", CTLFLAG_RD, &adapter->stats.icrxoc, "Interrupt Cause Receiver Overrun Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) em_print_nvm_info(adapter); return (error); } static void em_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { struct em_int_delay_info *info; struct adapter *adapter; u32 regval; int error, usecs, ticks; info = (struct em_int_delay_info *)arg1; usecs = info->value; error = sysctl_handle_int(oidp, &usecs, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535)) return (EINVAL); info->value = usecs; ticks = EM_USECS_TO_TICKS(usecs); if (info->offset == E1000_ITR) /* units are 256ns here */ ticks *= 4; adapter = info->adapter; regval = E1000_READ_OFFSET(&adapter->hw, info->offset); regval = (regval & ~0xffff) | (ticks & 0xffff); /* Handle a few special cases. */ switch (info->offset) { case E1000_RDTR: break; case E1000_TIDV: if (ticks == 0) { adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; /* Don't write 0 into the TIDV register. */ regval++; } else adapter->txd_cmd |= E1000_TXD_CMD_IDE; break; } E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); return (0); } static void em_add_int_delay_sysctl(struct adapter *adapter, const char *name, const char *description, struct em_int_delay_info *info, int offset, int value) { info->adapter = adapter; info->offset = offset; info->value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, em_sysctl_int_delay, "I", description); } static void em_set_sysctl_value(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLFLAG_RW, limit, value, description); } /* ** Set flow control using sysctl: ** Flow control values: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int em_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct adapter *adapter = (struct adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (input == adapter->fc) /* no change? */ return (error); switch (input) { case e1000_fc_rx_pause: case e1000_fc_tx_pause: case e1000_fc_full: case e1000_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; e1000_force_mac_fc(&adapter->hw); return (error); } /* ** Manage Energy Efficient Ethernet: ** Control values: ** 0/1 - enabled/disabled */ static int em_sysctl_eee(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error, value; value = adapter->hw.dev_spec.ich8lan.eee_disable; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0); em_if_init(adapter->ctx); return (0); } static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_debug_info(adapter); } return (error); } /* ** This routine is meant to be fluid, add whatever is ** needed for debugging a problem. -jfv */ static void em_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = &adapter->tx_queues->txr; struct rx_ring *rxr = &adapter->rx_queues->rxr; if (if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) printf("Interface is RUNNING "); else printf("Interface is NOT RUNNING\n"); if (if_getdrvflags(adapter->ifp) & IFF_DRV_OACTIVE) printf("and INACTIVE\n"); else printf("and ACTIVE\n"); for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { device_printf(dev, "TX Queue %d ------\n", i); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_TDH(i)), E1000_READ_REG(&adapter->hw, E1000_TDT(i))); } for (int j=0; j < adapter->rx_num_queues; j++, rxr++) { device_printf(dev, "RX Queue %d ------\n", j); device_printf(dev, "hw rdh = %d, hw rdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_RDH(j)), E1000_READ_REG(&adapter->hw, E1000_RDT(j))); } } /* * 82574 only: * Write a new value to the EEPROM increasing the number of MSIX * vectors from 3 to 5, for proper multiqueue support. */ static void em_enable_vectors_82574(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct e1000_hw *hw = &adapter->hw; device_t dev = iflib_get_dev(ctx); u16 edata; e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); printf("Current cap: %#06x\n", edata); if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) { device_printf(dev, "Writing to eeprom: increasing " "reported MSIX vectors from 3 to 5...\n"); edata &= ~(EM_NVM_MSIX_N_MASK); edata |= 4 << EM_NVM_MSIX_N_SHIFT; e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); e1000_update_nvm_checksum(hw); device_printf(dev, "Writing to eeprom: done\n"); } } #ifdef DDB DB_COMMAND(em_reset_dev, em_ddb_reset_dev) { devclass_t dc; int max_em; dc = devclass_find("em"); max_em = devclass_get_maxunit(dc); for (int index = 0; index < (max_em - 1); index++) { device_t dev; dev = devclass_get_device(dc, index); if (device_get_driver(dev) == &em_driver) { struct adapter *adapter = device_get_softc(dev); em_if_init(adapter->ctx); } } } DB_COMMAND(em_dump_queue, em_ddb_dump_queue) { devclass_t dc; int max_em; dc = devclass_find("em"); max_em = devclass_get_maxunit(dc); for (int index = 0; index < (max_em - 1); index++) { device_t dev; dev = devclass_get_device(dc, index); if (device_get_driver(dev) == &em_driver) em_print_debug_info(device_get_softc(dev)); } } #endif Index: projects/clang400-import/sys/dev/hyperv/netvsc/hn_nvs.c =================================================================== --- projects/clang400-import/sys/dev/hyperv/netvsc/hn_nvs.c (revision 312719) +++ projects/clang400-import/sys/dev/hyperv/netvsc/hn_nvs.c (revision 312720) @@ -1,721 +1,735 @@ /*- * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2010-2012 Citrix Inc. * Copyright (c) 2012 NetApp Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Network Virtualization Service. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet6.h" #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int hn_nvs_conn_chim(struct hn_softc *); static int hn_nvs_conn_rxbuf(struct hn_softc *); static void hn_nvs_disconn_chim(struct hn_softc *); static void hn_nvs_disconn_rxbuf(struct hn_softc *); static int hn_nvs_conf_ndis(struct hn_softc *, int); static int hn_nvs_init_ndis(struct hn_softc *); static int hn_nvs_doinit(struct hn_softc *, uint32_t); static int hn_nvs_init(struct hn_softc *); static const void *hn_nvs_xact_execute(struct hn_softc *, struct vmbus_xact *, void *, int, size_t *, uint32_t); static void hn_nvs_sent_none(struct hn_nvs_sendctx *, struct hn_softc *, struct vmbus_channel *, const void *, int); struct hn_nvs_sendctx hn_nvs_sendctx_none = HN_NVS_SENDCTX_INITIALIZER(hn_nvs_sent_none, NULL); static const uint32_t hn_nvs_version[] = { HN_NVS_VERSION_5, HN_NVS_VERSION_4, HN_NVS_VERSION_2, HN_NVS_VERSION_1 }; static const void * hn_nvs_xact_execute(struct hn_softc *sc, struct vmbus_xact *xact, void *req, int reqlen, size_t *resplen0, uint32_t type) { struct hn_nvs_sendctx sndc; size_t resplen, min_resplen = *resplen0; const struct hn_nvs_hdr *hdr; int error; KASSERT(min_resplen >= sizeof(*hdr), ("invalid minimum response len %zu", min_resplen)); /* * Execute the xact setup by the caller. */ hn_nvs_sendctx_init(&sndc, hn_nvs_sent_xact, xact); vmbus_xact_activate(xact); error = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_RC, req, reqlen, &sndc); if (error) { vmbus_xact_deactivate(xact); return (NULL); } hdr = vmbus_chan_xact_wait(sc->hn_prichan, xact, &resplen, HN_CAN_SLEEP(sc)); /* * Check this NVS response message. */ if (resplen < min_resplen) { if_printf(sc->hn_ifp, "invalid NVS resp len %zu\n", resplen); return (NULL); } if (hdr->nvs_type != type) { if_printf(sc->hn_ifp, "unexpected NVS resp 0x%08x, " "expect 0x%08x\n", hdr->nvs_type, type); return (NULL); } /* All pass! */ *resplen0 = resplen; return (hdr); } static __inline int hn_nvs_req_send(struct hn_softc *sc, void *req, int reqlen) { return (hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_NONE, req, reqlen, &hn_nvs_sendctx_none)); } static int hn_nvs_conn_rxbuf(struct hn_softc *sc) { struct vmbus_xact *xact = NULL; struct hn_nvs_rxbuf_conn *conn; const struct hn_nvs_rxbuf_connresp *resp; size_t resp_len; uint32_t status; int error, rxbuf_size; /* * Limit RXBUF size for old NVS. */ if (sc->hn_nvs_ver <= HN_NVS_VERSION_2) rxbuf_size = HN_RXBUF_SIZE_COMPAT; else rxbuf_size = HN_RXBUF_SIZE; /* * Connect the RXBUF GPADL to the primary channel. * * NOTE: * Only primary channel has RXBUF connected to it. Sub-channels * just share this RXBUF. */ error = vmbus_chan_gpadl_connect(sc->hn_prichan, sc->hn_rxbuf_dma.hv_paddr, rxbuf_size, &sc->hn_rxbuf_gpadl); if (error) { if_printf(sc->hn_ifp, "rxbuf gpadl conn failed: %d\n", error); goto cleanup; } /* * Connect RXBUF to NVS. */ xact = vmbus_xact_get(sc->hn_xact, sizeof(*conn)); if (xact == NULL) { if_printf(sc->hn_ifp, "no xact for nvs rxbuf conn\n"); error = ENXIO; goto cleanup; } conn = vmbus_xact_req_data(xact); conn->nvs_type = HN_NVS_TYPE_RXBUF_CONN; conn->nvs_gpadl = sc->hn_rxbuf_gpadl; conn->nvs_sig = HN_NVS_RXBUF_SIG; resp_len = sizeof(*resp); resp = hn_nvs_xact_execute(sc, xact, conn, sizeof(*conn), &resp_len, HN_NVS_TYPE_RXBUF_CONNRESP); if (resp == NULL) { if_printf(sc->hn_ifp, "exec nvs rxbuf conn failed\n"); error = EIO; goto cleanup; } status = resp->nvs_status; vmbus_xact_put(xact); xact = NULL; if (status != HN_NVS_STATUS_OK) { if_printf(sc->hn_ifp, "nvs rxbuf conn failed: %x\n", status); error = EIO; goto cleanup; } sc->hn_flags |= HN_FLAG_RXBUF_CONNECTED; return (0); cleanup: if (xact != NULL) vmbus_xact_put(xact); hn_nvs_disconn_rxbuf(sc); return (error); } static int hn_nvs_conn_chim(struct hn_softc *sc) { struct vmbus_xact *xact = NULL; struct hn_nvs_chim_conn *chim; const struct hn_nvs_chim_connresp *resp; size_t resp_len; uint32_t status, sectsz; int error; /* * Connect chimney sending buffer GPADL to the primary channel. * * NOTE: * Only primary channel has chimney sending buffer connected to it. * Sub-channels just share this chimney sending buffer. */ error = vmbus_chan_gpadl_connect(sc->hn_prichan, sc->hn_chim_dma.hv_paddr, HN_CHIM_SIZE, &sc->hn_chim_gpadl); if (error) { if_printf(sc->hn_ifp, "chim gpadl conn failed: %d\n", error); goto cleanup; } /* * Connect chimney sending buffer to NVS */ xact = vmbus_xact_get(sc->hn_xact, sizeof(*chim)); if (xact == NULL) { if_printf(sc->hn_ifp, "no xact for nvs chim conn\n"); error = ENXIO; goto cleanup; } chim = vmbus_xact_req_data(xact); chim->nvs_type = HN_NVS_TYPE_CHIM_CONN; chim->nvs_gpadl = sc->hn_chim_gpadl; chim->nvs_sig = HN_NVS_CHIM_SIG; resp_len = sizeof(*resp); resp = hn_nvs_xact_execute(sc, xact, chim, sizeof(*chim), &resp_len, HN_NVS_TYPE_CHIM_CONNRESP); if (resp == NULL) { if_printf(sc->hn_ifp, "exec nvs chim conn failed\n"); error = EIO; goto cleanup; } status = resp->nvs_status; sectsz = resp->nvs_sectsz; vmbus_xact_put(xact); xact = NULL; if (status != HN_NVS_STATUS_OK) { if_printf(sc->hn_ifp, "nvs chim conn failed: %x\n", status); error = EIO; goto cleanup; } if (sectsz == 0) { /* * Can't use chimney sending buffer; done! */ if_printf(sc->hn_ifp, "zero chimney sending buffer " "section size\n"); sc->hn_chim_szmax = 0; sc->hn_chim_cnt = 0; sc->hn_flags |= HN_FLAG_CHIM_CONNECTED; return (0); } sc->hn_chim_szmax = sectsz; sc->hn_chim_cnt = HN_CHIM_SIZE / sc->hn_chim_szmax; if (HN_CHIM_SIZE % sc->hn_chim_szmax != 0) { if_printf(sc->hn_ifp, "chimney sending sections are " "not properly aligned\n"); } if (sc->hn_chim_cnt % LONG_BIT != 0) { if_printf(sc->hn_ifp, "discard %d chimney sending sections\n", sc->hn_chim_cnt % LONG_BIT); } sc->hn_chim_bmap_cnt = sc->hn_chim_cnt / LONG_BIT; sc->hn_chim_bmap = malloc(sc->hn_chim_bmap_cnt * sizeof(u_long), M_DEVBUF, M_WAITOK | M_ZERO); /* Done! */ sc->hn_flags |= HN_FLAG_CHIM_CONNECTED; if (bootverbose) { if_printf(sc->hn_ifp, "chimney sending buffer %d/%d\n", sc->hn_chim_szmax, sc->hn_chim_cnt); } return (0); cleanup: if (xact != NULL) vmbus_xact_put(xact); hn_nvs_disconn_chim(sc); return (error); } static void hn_nvs_disconn_rxbuf(struct hn_softc *sc) { int error; if (sc->hn_flags & HN_FLAG_RXBUF_CONNECTED) { struct hn_nvs_rxbuf_disconn disconn; /* * Disconnect RXBUF from NVS. */ memset(&disconn, 0, sizeof(disconn)); disconn.nvs_type = HN_NVS_TYPE_RXBUF_DISCONN; disconn.nvs_sig = HN_NVS_RXBUF_SIG; /* NOTE: No response. */ error = hn_nvs_req_send(sc, &disconn, sizeof(disconn)); if (error) { if_printf(sc->hn_ifp, "send nvs rxbuf disconn failed: %d\n", error); /* * Fine for a revoked channel, since the hypervisor * does not drain TX bufring for a revoked channel. */ if (!vmbus_chan_is_revoked(sc->hn_prichan)) sc->hn_flags |= HN_FLAG_RXBUF_REF; } sc->hn_flags &= ~HN_FLAG_RXBUF_CONNECTED; /* * Wait for the hypervisor to receive this NVS request. * * NOTE: * The TX bufring will not be drained by the hypervisor, * if the primary channel is revoked. */ while (!vmbus_chan_tx_empty(sc->hn_prichan) && !vmbus_chan_is_revoked(sc->hn_prichan)) pause("waittx", 1); /* * Linger long enough for NVS to disconnect RXBUF. */ pause("lingtx", (200 * hz) / 1000); } if (sc->hn_rxbuf_gpadl != 0) { /* * Disconnect RXBUF from primary channel. */ error = vmbus_chan_gpadl_disconnect(sc->hn_prichan, sc->hn_rxbuf_gpadl); if (error) { if_printf(sc->hn_ifp, "rxbuf gpadl disconn failed: %d\n", error); sc->hn_flags |= HN_FLAG_RXBUF_REF; } sc->hn_rxbuf_gpadl = 0; } } static void hn_nvs_disconn_chim(struct hn_softc *sc) { int error; if (sc->hn_flags & HN_FLAG_CHIM_CONNECTED) { struct hn_nvs_chim_disconn disconn; /* * Disconnect chimney sending buffer from NVS. */ memset(&disconn, 0, sizeof(disconn)); disconn.nvs_type = HN_NVS_TYPE_CHIM_DISCONN; disconn.nvs_sig = HN_NVS_CHIM_SIG; /* NOTE: No response. */ error = hn_nvs_req_send(sc, &disconn, sizeof(disconn)); if (error) { if_printf(sc->hn_ifp, "send nvs chim disconn failed: %d\n", error); /* * Fine for a revoked channel, since the hypervisor * does not drain TX bufring for a revoked channel. */ if (!vmbus_chan_is_revoked(sc->hn_prichan)) sc->hn_flags |= HN_FLAG_CHIM_REF; } sc->hn_flags &= ~HN_FLAG_CHIM_CONNECTED; /* * Wait for the hypervisor to receive this NVS request. * * NOTE: * The TX bufring will not be drained by the hypervisor, * if the primary channel is revoked. */ while (!vmbus_chan_tx_empty(sc->hn_prichan) && !vmbus_chan_is_revoked(sc->hn_prichan)) pause("waittx", 1); /* * Linger long enough for NVS to disconnect chimney * sending buffer. */ pause("lingtx", (200 * hz) / 1000); } if (sc->hn_chim_gpadl != 0) { /* * Disconnect chimney sending buffer from primary channel. */ error = vmbus_chan_gpadl_disconnect(sc->hn_prichan, sc->hn_chim_gpadl); if (error) { if_printf(sc->hn_ifp, "chim gpadl disconn failed: %d\n", error); sc->hn_flags |= HN_FLAG_CHIM_REF; } sc->hn_chim_gpadl = 0; } if (sc->hn_chim_bmap != NULL) { free(sc->hn_chim_bmap, M_DEVBUF); sc->hn_chim_bmap = NULL; sc->hn_chim_bmap_cnt = 0; } } static int hn_nvs_doinit(struct hn_softc *sc, uint32_t nvs_ver) { struct vmbus_xact *xact; struct hn_nvs_init *init; const struct hn_nvs_init_resp *resp; size_t resp_len; uint32_t status; xact = vmbus_xact_get(sc->hn_xact, sizeof(*init)); if (xact == NULL) { if_printf(sc->hn_ifp, "no xact for nvs init\n"); return (ENXIO); } init = vmbus_xact_req_data(xact); init->nvs_type = HN_NVS_TYPE_INIT; init->nvs_ver_min = nvs_ver; init->nvs_ver_max = nvs_ver; resp_len = sizeof(*resp); resp = hn_nvs_xact_execute(sc, xact, init, sizeof(*init), &resp_len, HN_NVS_TYPE_INIT_RESP); if (resp == NULL) { if_printf(sc->hn_ifp, "exec init failed\n"); vmbus_xact_put(xact); return (EIO); } status = resp->nvs_status; vmbus_xact_put(xact); if (status != HN_NVS_STATUS_OK) { if (bootverbose) { /* * Caller may try another NVS version, and will log * error if there are no more NVS versions to try, * so don't bark out loud here. */ if_printf(sc->hn_ifp, "nvs init failed for ver 0x%x\n", nvs_ver); } return (EINVAL); } return (0); } /* * Configure MTU and enable VLAN. */ static int hn_nvs_conf_ndis(struct hn_softc *sc, int mtu) { struct hn_nvs_ndis_conf conf; int error; memset(&conf, 0, sizeof(conf)); conf.nvs_type = HN_NVS_TYPE_NDIS_CONF; conf.nvs_mtu = mtu; conf.nvs_caps = HN_NVS_NDIS_CONF_VLAN; + if (sc->hn_nvs_ver >= HN_NVS_VERSION_5) + conf.nvs_caps |= HN_NVS_NDIS_CONF_SRIOV; /* NOTE: No response. */ error = hn_nvs_req_send(sc, &conf, sizeof(conf)); if (error) { if_printf(sc->hn_ifp, "send nvs ndis conf failed: %d\n", error); return (error); } if (bootverbose) if_printf(sc->hn_ifp, "nvs ndis conf done\n"); sc->hn_caps |= HN_CAP_MTU | HN_CAP_VLAN; return (0); } static int hn_nvs_init_ndis(struct hn_softc *sc) { struct hn_nvs_ndis_init ndis; int error; memset(&ndis, 0, sizeof(ndis)); ndis.nvs_type = HN_NVS_TYPE_NDIS_INIT; ndis.nvs_ndis_major = HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver); ndis.nvs_ndis_minor = HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver); /* NOTE: No response. */ error = hn_nvs_req_send(sc, &ndis, sizeof(ndis)); if (error) if_printf(sc->hn_ifp, "send nvs ndis init failed: %d\n", error); return (error); } static int hn_nvs_init(struct hn_softc *sc) { int i, error; if (device_is_attached(sc->hn_dev)) { /* * NVS version and NDIS version MUST NOT be changed. */ if (bootverbose) { if_printf(sc->hn_ifp, "reinit NVS version 0x%x, " "NDIS version %u.%u\n", sc->hn_nvs_ver, HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver), HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver)); } error = hn_nvs_doinit(sc, sc->hn_nvs_ver); if (error) { if_printf(sc->hn_ifp, "reinit NVS version 0x%x " "failed: %d\n", sc->hn_nvs_ver, error); return (error); } goto done; } /* * Find the supported NVS version and set NDIS version accordingly. */ for (i = 0; i < nitems(hn_nvs_version); ++i) { error = hn_nvs_doinit(sc, hn_nvs_version[i]); if (!error) { sc->hn_nvs_ver = hn_nvs_version[i]; /* Set NDIS version according to NVS version. */ sc->hn_ndis_ver = HN_NDIS_VERSION_6_30; if (sc->hn_nvs_ver <= HN_NVS_VERSION_4) sc->hn_ndis_ver = HN_NDIS_VERSION_6_1; if (bootverbose) { if_printf(sc->hn_ifp, "NVS version 0x%x, " "NDIS version %u.%u\n", sc->hn_nvs_ver, HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver), HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver)); } goto done; } } if_printf(sc->hn_ifp, "no NVS available\n"); return (ENXIO); done: if (sc->hn_nvs_ver >= HN_NVS_VERSION_5) sc->hn_caps |= HN_CAP_HASHVAL; return (0); } int hn_nvs_attach(struct hn_softc *sc, int mtu) { int error; /* * Initialize NVS. */ error = hn_nvs_init(sc); if (error) return (error); if (sc->hn_nvs_ver >= HN_NVS_VERSION_2) { /* * Configure NDIS before initializing it. */ error = hn_nvs_conf_ndis(sc, mtu); if (error) return (error); } /* * Initialize NDIS. */ error = hn_nvs_init_ndis(sc); if (error) return (error); /* * Connect RXBUF. */ error = hn_nvs_conn_rxbuf(sc); if (error) return (error); /* * Connect chimney sending buffer. */ error = hn_nvs_conn_chim(sc); if (error) { hn_nvs_disconn_rxbuf(sc); return (error); } return (0); } void hn_nvs_detach(struct hn_softc *sc) { /* NOTE: there are no requests to stop the NVS. */ hn_nvs_disconn_rxbuf(sc); hn_nvs_disconn_chim(sc); } void hn_nvs_sent_xact(struct hn_nvs_sendctx *sndc, struct hn_softc *sc __unused, struct vmbus_channel *chan __unused, const void *data, int dlen) { vmbus_xact_wakeup(sndc->hn_cbarg, data, dlen); } static void hn_nvs_sent_none(struct hn_nvs_sendctx *sndc __unused, struct hn_softc *sc __unused, struct vmbus_channel *chan __unused, const void *data __unused, int dlen __unused) { /* EMPTY */ } int hn_nvs_alloc_subchans(struct hn_softc *sc, int *nsubch0) { struct vmbus_xact *xact; struct hn_nvs_subch_req *req; const struct hn_nvs_subch_resp *resp; int error, nsubch_req; uint32_t nsubch; size_t resp_len; nsubch_req = *nsubch0; KASSERT(nsubch_req > 0, ("invalid # of sub-channels %d", nsubch_req)); xact = vmbus_xact_get(sc->hn_xact, sizeof(*req)); if (xact == NULL) { if_printf(sc->hn_ifp, "no xact for nvs subch alloc\n"); return (ENXIO); } req = vmbus_xact_req_data(xact); req->nvs_type = HN_NVS_TYPE_SUBCH_REQ; req->nvs_op = HN_NVS_SUBCH_OP_ALLOC; req->nvs_nsubch = nsubch_req; resp_len = sizeof(*resp); resp = hn_nvs_xact_execute(sc, xact, req, sizeof(*req), &resp_len, HN_NVS_TYPE_SUBCH_RESP); if (resp == NULL) { if_printf(sc->hn_ifp, "exec nvs subch alloc failed\n"); error = EIO; goto done; } if (resp->nvs_status != HN_NVS_STATUS_OK) { if_printf(sc->hn_ifp, "nvs subch alloc failed: %x\n", resp->nvs_status); error = EIO; goto done; } nsubch = resp->nvs_nsubch; if (nsubch > nsubch_req) { if_printf(sc->hn_ifp, "%u subchans are allocated, " "requested %d\n", nsubch, nsubch_req); nsubch = nsubch_req; } *nsubch0 = nsubch; error = 0; done: vmbus_xact_put(xact); return (error); } int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan, struct hn_nvs_sendctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt) { return hn_nvs_send_rndis_sglist(chan, HN_NVS_RNDIS_MTYPE_CTRL, sndc, gpa, gpa_cnt); +} + +void +hn_nvs_set_datapath(struct hn_softc *sc, uint32_t path) +{ + struct hn_nvs_datapath dp; + + memset(&dp, 0, sizeof(dp)); + dp.nvs_type = HN_NVS_TYPE_SET_DATAPATH; + dp.nvs_active_path = path; + + hn_nvs_req_send(sc, &dp, sizeof(dp)); } Index: projects/clang400-import/sys/dev/hyperv/netvsc/hn_nvs.h =================================================================== --- projects/clang400-import/sys/dev/hyperv/netvsc/hn_nvs.h (revision 312719) +++ projects/clang400-import/sys/dev/hyperv/netvsc/hn_nvs.h (revision 312720) @@ -1,106 +1,107 @@ /*- * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2010-2012 Citrix Inc. * Copyright (c) 2012 NetApp Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _HN_NVS_H_ #define _HN_NVS_H_ struct hn_nvs_sendctx; struct vmbus_channel; struct hn_softc; typedef void (*hn_nvs_sent_t) (struct hn_nvs_sendctx *, struct hn_softc *, struct vmbus_channel *, const void *, int); struct hn_nvs_sendctx { hn_nvs_sent_t hn_cb; void *hn_cbarg; }; #define HN_NVS_SENDCTX_INITIALIZER(cb, cbarg) \ { \ .hn_cb = cb, \ .hn_cbarg = cbarg \ } static __inline void hn_nvs_sendctx_init(struct hn_nvs_sendctx *sndc, hn_nvs_sent_t cb, void *cbarg) { sndc->hn_cb = cb; sndc->hn_cbarg = cbarg; } static __inline int hn_nvs_send(struct vmbus_channel *chan, uint16_t flags, void *nvs_msg, int nvs_msglen, struct hn_nvs_sendctx *sndc) { return (vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND, flags, nvs_msg, nvs_msglen, (uint64_t)(uintptr_t)sndc)); } static __inline int hn_nvs_send_sglist(struct vmbus_channel *chan, struct vmbus_gpa sg[], int sglen, void *nvs_msg, int nvs_msglen, struct hn_nvs_sendctx *sndc) { return (vmbus_chan_send_sglist(chan, sg, sglen, nvs_msg, nvs_msglen, (uint64_t)(uintptr_t)sndc)); } static __inline int hn_nvs_send_rndis_sglist(struct vmbus_channel *chan, uint32_t rndis_mtype, struct hn_nvs_sendctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt) { struct hn_nvs_rndis rndis; rndis.nvs_type = HN_NVS_TYPE_RNDIS; rndis.nvs_rndis_mtype = rndis_mtype; rndis.nvs_chim_idx = HN_NVS_CHIM_IDX_INVALID; rndis.nvs_chim_sz = 0; return (hn_nvs_send_sglist(chan, gpa, gpa_cnt, &rndis, sizeof(rndis), sndc)); } int hn_nvs_attach(struct hn_softc *sc, int mtu); void hn_nvs_detach(struct hn_softc *sc); int hn_nvs_alloc_subchans(struct hn_softc *sc, int *nsubch); void hn_nvs_sent_xact(struct hn_nvs_sendctx *sndc, struct hn_softc *sc, struct vmbus_channel *chan, const void *data, int dlen); int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan, struct hn_nvs_sendctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt); +void hn_nvs_set_datapath(struct hn_softc *sc, uint32_t path); extern struct hn_nvs_sendctx hn_nvs_sendctx_none; #endif /* !_HN_NVS_H_ */ Index: projects/clang400-import/sys/dev/hyperv/netvsc/if_hn.c =================================================================== --- projects/clang400-import/sys/dev/hyperv/netvsc/if_hn.c (revision 312719) +++ projects/clang400-import/sys/dev/hyperv/netvsc/if_hn.c (revision 312720) @@ -1,5575 +1,5743 @@ /*- * Copyright (c) 2010-2012 Citrix Inc. * Copyright (c) 2009-2012,2016 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2004-2006 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_hn.h" #include "opt_inet6.h" #include "opt_inet.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include +#include #include #include #include #include #ifdef RSS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "vmbus_if.h" #define HN_IFSTART_SUPPORT #define HN_RING_CNT_DEF_MAX 8 /* YYY should get it from the underlying channel */ #define HN_TX_DESC_CNT 512 #define HN_RNDIS_PKT_LEN \ (sizeof(struct rndis_packet_msg) + \ HN_RNDIS_PKTINFO_SIZE(HN_NDIS_HASH_VALUE_SIZE) + \ HN_RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \ HN_RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \ HN_RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE)) #define HN_RNDIS_PKT_BOUNDARY PAGE_SIZE #define HN_RNDIS_PKT_ALIGN CACHE_LINE_SIZE #define HN_TX_DATA_BOUNDARY PAGE_SIZE #define HN_TX_DATA_MAXSIZE IP_MAXPACKET #define HN_TX_DATA_SEGSIZE PAGE_SIZE /* -1 for RNDIS packet message */ #define HN_TX_DATA_SEGCNT_MAX (HN_GPACNT_MAX - 1) #define HN_DIRECT_TX_SIZE_DEF 128 #define HN_EARLY_TXEOF_THRESH 8 #define HN_PKTBUF_LEN_DEF (16 * 1024) #define HN_LROENT_CNT_DEF 128 #define HN_LRO_LENLIM_MULTIRX_DEF (12 * ETHERMTU) #define HN_LRO_LENLIM_DEF (25 * ETHERMTU) /* YYY 2*MTU is a bit rough, but should be good enough. */ #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu) #define HN_LRO_ACKCNT_DEF 1 #define HN_LOCK_INIT(sc) \ sx_init(&(sc)->hn_lock, device_get_nameunit((sc)->hn_dev)) #define HN_LOCK_DESTROY(sc) sx_destroy(&(sc)->hn_lock) #define HN_LOCK_ASSERT(sc) sx_assert(&(sc)->hn_lock, SA_XLOCKED) #define HN_LOCK(sc) \ do { \ while (sx_try_xlock(&(sc)->hn_lock) == 0) \ DELAY(1000); \ } while (0) #define HN_UNLOCK(sc) sx_xunlock(&(sc)->hn_lock) #define HN_CSUM_IP_MASK (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP) #define HN_CSUM_IP6_MASK (CSUM_IP6_TCP | CSUM_IP6_UDP) #define HN_CSUM_IP_HWASSIST(sc) \ ((sc)->hn_tx_ring[0].hn_csum_assist & HN_CSUM_IP_MASK) #define HN_CSUM_IP6_HWASSIST(sc) \ ((sc)->hn_tx_ring[0].hn_csum_assist & HN_CSUM_IP6_MASK) #define HN_PKTSIZE_MIN(align) \ roundup2(ETHER_MIN_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN + \ HN_RNDIS_PKT_LEN, (align)) #define HN_PKTSIZE(m, align) \ roundup2((m)->m_pkthdr.len + HN_RNDIS_PKT_LEN, (align)) #ifdef RSS #define HN_RING_IDX2CPU(sc, idx) rss_getcpu((idx) % rss_getnumbuckets()) #else #define HN_RING_IDX2CPU(sc, idx) (((sc)->hn_cpu + (idx)) % mp_ncpus) #endif struct hn_txdesc { #ifndef HN_USE_TXDESC_BUFRING SLIST_ENTRY(hn_txdesc) link; #endif STAILQ_ENTRY(hn_txdesc) agg_link; /* Aggregated txdescs, in sending order. */ STAILQ_HEAD(, hn_txdesc) agg_list; /* The oldest packet, if transmission aggregation happens. */ struct mbuf *m; struct hn_tx_ring *txr; int refs; uint32_t flags; /* HN_TXD_FLAG_ */ struct hn_nvs_sendctx send_ctx; uint32_t chim_index; int chim_size; bus_dmamap_t data_dmap; bus_addr_t rndis_pkt_paddr; struct rndis_packet_msg *rndis_pkt; bus_dmamap_t rndis_pkt_dmap; }; #define HN_TXD_FLAG_ONLIST 0x0001 #define HN_TXD_FLAG_DMAMAP 0x0002 #define HN_TXD_FLAG_ONAGG 0x0004 struct hn_rxinfo { uint32_t vlan_info; uint32_t csum_info; uint32_t hash_info; uint32_t hash_value; }; +struct hn_update_vf { + struct hn_rx_ring *rxr; + struct ifnet *vf; +}; + #define HN_RXINFO_VLAN 0x0001 #define HN_RXINFO_CSUM 0x0002 #define HN_RXINFO_HASHINF 0x0004 #define HN_RXINFO_HASHVAL 0x0008 #define HN_RXINFO_ALL \ (HN_RXINFO_VLAN | \ HN_RXINFO_CSUM | \ HN_RXINFO_HASHINF | \ HN_RXINFO_HASHVAL) #define HN_NDIS_VLAN_INFO_INVALID 0xffffffff #define HN_NDIS_RXCSUM_INFO_INVALID 0 #define HN_NDIS_HASH_INFO_INVALID 0 static int hn_probe(device_t); static int hn_attach(device_t); static int hn_detach(device_t); static int hn_shutdown(device_t); static void hn_chan_callback(struct vmbus_channel *, void *); static void hn_init(void *); static int hn_ioctl(struct ifnet *, u_long, caddr_t); #ifdef HN_IFSTART_SUPPORT static void hn_start(struct ifnet *); #endif static int hn_transmit(struct ifnet *, struct mbuf *); static void hn_xmit_qflush(struct ifnet *); static int hn_ifmedia_upd(struct ifnet *); static void hn_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int hn_rndis_rxinfo(const void *, int, struct hn_rxinfo *); static void hn_rndis_rx_data(struct hn_rx_ring *, const void *, int); static void hn_rndis_rx_status(struct hn_softc *, const void *, int); static void hn_nvs_handle_notify(struct hn_softc *, const struct vmbus_chanpkt_hdr *); static void hn_nvs_handle_comp(struct hn_softc *, struct vmbus_channel *, const struct vmbus_chanpkt_hdr *); static void hn_nvs_handle_rxbuf(struct hn_rx_ring *, struct vmbus_channel *, const struct vmbus_chanpkt_hdr *); static void hn_nvs_ack_rxbuf(struct hn_rx_ring *, struct vmbus_channel *, uint64_t); #if __FreeBSD_version >= 1100099 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS); #endif static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); static int hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS); #if __FreeBSD_version < 1100095 static int hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS); #else static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS); #endif static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS); static int hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS); static int hn_caps_sysctl(SYSCTL_HANDLER_ARGS); static int hn_hwassist_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rxfilter_sysctl(SYSCTL_HANDLER_ARGS); #ifndef RSS static int hn_rss_key_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rss_ind_sysctl(SYSCTL_HANDLER_ARGS); #endif static int hn_rss_hash_sysctl(SYSCTL_HANDLER_ARGS); static int hn_txagg_size_sysctl(SYSCTL_HANDLER_ARGS); static int hn_txagg_pkts_sysctl(SYSCTL_HANDLER_ARGS); static int hn_txagg_pktmax_sysctl(SYSCTL_HANDLER_ARGS); static int hn_txagg_align_sysctl(SYSCTL_HANDLER_ARGS); static int hn_polling_sysctl(SYSCTL_HANDLER_ARGS); +static int hn_vf_sysctl(SYSCTL_HANDLER_ARGS); -static void hn_stop(struct hn_softc *); +static void hn_stop(struct hn_softc *, bool); static void hn_init_locked(struct hn_softc *); static int hn_chan_attach(struct hn_softc *, struct vmbus_channel *); static void hn_chan_detach(struct hn_softc *, struct vmbus_channel *); static int hn_attach_subchans(struct hn_softc *); static void hn_detach_allchans(struct hn_softc *); static void hn_chan_rollup(struct hn_rx_ring *, struct hn_tx_ring *); static void hn_set_ring_inuse(struct hn_softc *, int); static int hn_synth_attach(struct hn_softc *, int); static void hn_synth_detach(struct hn_softc *); static int hn_synth_alloc_subchans(struct hn_softc *, int *); static bool hn_synth_attachable(const struct hn_softc *); static void hn_suspend(struct hn_softc *); static void hn_suspend_data(struct hn_softc *); static void hn_suspend_mgmt(struct hn_softc *); static void hn_resume(struct hn_softc *); static void hn_resume_data(struct hn_softc *); static void hn_resume_mgmt(struct hn_softc *); static void hn_suspend_mgmt_taskfunc(void *, int); static void hn_chan_drain(struct hn_softc *, struct vmbus_channel *); static void hn_polling(struct hn_softc *, u_int); static void hn_chan_polling(struct vmbus_channel *, u_int); static void hn_update_link_status(struct hn_softc *); static void hn_change_network(struct hn_softc *); static void hn_link_taskfunc(void *, int); static void hn_netchg_init_taskfunc(void *, int); static void hn_netchg_status_taskfunc(void *, int); static void hn_link_status(struct hn_softc *); static int hn_create_rx_data(struct hn_softc *, int); static void hn_destroy_rx_data(struct hn_softc *); static int hn_check_iplen(const struct mbuf *, int); static int hn_set_rxfilter(struct hn_softc *, uint32_t); static int hn_rxfilter_config(struct hn_softc *); #ifndef RSS static int hn_rss_reconfig(struct hn_softc *); #endif static void hn_rss_ind_fixup(struct hn_softc *); static int hn_rxpkt(struct hn_rx_ring *, const void *, int, const struct hn_rxinfo *); static int hn_tx_ring_create(struct hn_softc *, int); static void hn_tx_ring_destroy(struct hn_tx_ring *); static int hn_create_tx_data(struct hn_softc *, int); static void hn_fixup_tx_data(struct hn_softc *); static void hn_destroy_tx_data(struct hn_softc *); static void hn_txdesc_dmamap_destroy(struct hn_txdesc *); static void hn_txdesc_gc(struct hn_tx_ring *, struct hn_txdesc *); static int hn_encap(struct ifnet *, struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **); static int hn_txpkt(struct ifnet *, struct hn_tx_ring *, struct hn_txdesc *); static void hn_set_chim_size(struct hn_softc *, int); static void hn_set_tso_maxsize(struct hn_softc *, int, int); static bool hn_tx_ring_pending(struct hn_tx_ring *); static void hn_tx_ring_qflush(struct hn_tx_ring *); static void hn_resume_tx(struct hn_softc *, int); static void hn_set_txagg(struct hn_softc *); static void *hn_try_txagg(struct ifnet *, struct hn_tx_ring *, struct hn_txdesc *, int); static int hn_get_txswq_depth(const struct hn_tx_ring *); static void hn_txpkt_done(struct hn_nvs_sendctx *, struct hn_softc *, struct vmbus_channel *, const void *, int); static int hn_txpkt_sglist(struct hn_tx_ring *, struct hn_txdesc *); static int hn_txpkt_chim(struct hn_tx_ring *, struct hn_txdesc *); static int hn_xmit(struct hn_tx_ring *, int); static void hn_xmit_taskfunc(void *, int); static void hn_xmit_txeof(struct hn_tx_ring *); static void hn_xmit_txeof_taskfunc(void *, int); #ifdef HN_IFSTART_SUPPORT static int hn_start_locked(struct hn_tx_ring *, int); static void hn_start_taskfunc(void *, int); static void hn_start_txeof(struct hn_tx_ring *); static void hn_start_txeof_taskfunc(void *, int); #endif SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Hyper-V network interface"); /* Trust tcp segements verification on host side. */ static int hn_trust_hosttcp = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN, &hn_trust_hosttcp, 0, "Trust tcp segement verification on host side, " "when csum info is missing (global setting)"); /* Trust udp datagrams verification on host side. */ static int hn_trust_hostudp = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN, &hn_trust_hostudp, 0, "Trust udp datagram verification on host side, " "when csum info is missing (global setting)"); /* Trust ip packets verification on host side. */ static int hn_trust_hostip = 1; SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN, &hn_trust_hostip, 0, "Trust ip packet verification on host side, " "when csum info is missing (global setting)"); /* Limit TSO burst size */ static int hn_tso_maxlen = IP_MAXPACKET; SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, &hn_tso_maxlen, 0, "TSO burst limit"); /* Limit chimney send size */ static int hn_tx_chimney_size = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN, &hn_tx_chimney_size, 0, "Chimney send packet size limit"); /* Limit the size of packet for direct transmission */ static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF; SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN, &hn_direct_tx_size, 0, "Size of the packet for direct transmission"); /* # of LRO entries per RX ring */ #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 static int hn_lro_entry_count = HN_LROENT_CNT_DEF; SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, &hn_lro_entry_count, 0, "LRO entry count"); #endif #endif static int hn_tx_taskq_cnt = 1; SYSCTL_INT(_hw_hn, OID_AUTO, tx_taskq_cnt, CTLFLAG_RDTUN, &hn_tx_taskq_cnt, 0, "# of TX taskqueues"); #define HN_TX_TASKQ_M_INDEP 0 #define HN_TX_TASKQ_M_GLOBAL 1 #define HN_TX_TASKQ_M_EVTTQ 2 static int hn_tx_taskq_mode = HN_TX_TASKQ_M_INDEP; SYSCTL_INT(_hw_hn, OID_AUTO, tx_taskq_mode, CTLFLAG_RDTUN, &hn_tx_taskq_mode, 0, "TX taskqueue modes: " "0 - independent, 1 - share global tx taskqs, 2 - share event taskqs"); #ifndef HN_USE_TXDESC_BUFRING static int hn_use_txdesc_bufring = 0; #else static int hn_use_txdesc_bufring = 1; #endif SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD, &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors"); #ifdef HN_IFSTART_SUPPORT /* Use ifnet.if_start instead of ifnet.if_transmit */ static int hn_use_if_start = 0; SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN, &hn_use_if_start, 0, "Use if_start TX method"); #endif /* # of channels to use */ static int hn_chan_cnt = 0; SYSCTL_INT(_hw_hn, OID_AUTO, chan_cnt, CTLFLAG_RDTUN, &hn_chan_cnt, 0, "# of channels to use; each channel has one RX ring and one TX ring"); /* # of transmit rings to use */ static int hn_tx_ring_cnt = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tx_ring_cnt, CTLFLAG_RDTUN, &hn_tx_ring_cnt, 0, "# of TX rings to use"); /* Software TX ring deptch */ static int hn_tx_swq_depth = 0; SYSCTL_INT(_hw_hn, OID_AUTO, tx_swq_depth, CTLFLAG_RDTUN, &hn_tx_swq_depth, 0, "Depth of IFQ or BUFRING"); /* Enable sorted LRO, and the depth of the per-channel mbuf queue */ #if __FreeBSD_version >= 1100095 static u_int hn_lro_mbufq_depth = 0; SYSCTL_UINT(_hw_hn, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN, &hn_lro_mbufq_depth, 0, "Depth of LRO mbuf queue"); #endif /* Packet transmission aggregation size limit */ static int hn_tx_agg_size = -1; SYSCTL_INT(_hw_hn, OID_AUTO, tx_agg_size, CTLFLAG_RDTUN, &hn_tx_agg_size, 0, "Packet transmission aggregation size limit"); /* Packet transmission aggregation count limit */ static int hn_tx_agg_pkts = -1; SYSCTL_INT(_hw_hn, OID_AUTO, tx_agg_pkts, CTLFLAG_RDTUN, &hn_tx_agg_pkts, 0, "Packet transmission aggregation packet limit"); static u_int hn_cpu_index; /* next CPU for channel */ static struct taskqueue **hn_tx_taskque;/* shared TX taskqueues */ #ifndef RSS static const uint8_t hn_rss_key_default[NDIS_HASH_KEYSIZE_TOEPLITZ] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; #endif /* !RSS */ static device_method_t hn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hn_probe), DEVMETHOD(device_attach, hn_attach), DEVMETHOD(device_detach, hn_detach), DEVMETHOD(device_shutdown, hn_shutdown), DEVMETHOD_END }; static driver_t hn_driver = { "hn", hn_methods, sizeof(struct hn_softc) }; static devclass_t hn_devclass; DRIVER_MODULE(hn, vmbus, hn_driver, hn_devclass, 0, 0); MODULE_VERSION(hn, 1); MODULE_DEPEND(hn, vmbus, 1, 1, 1); #if __FreeBSD_version >= 1100099 static void hn_set_lro_lenlim(struct hn_softc *sc, int lenlim) { int i; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim; } #endif static int hn_txpkt_sglist(struct hn_tx_ring *txr, struct hn_txdesc *txd) { KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID && txd->chim_size == 0, ("invalid rndis sglist txd")); return (hn_nvs_send_rndis_sglist(txr->hn_chan, HN_NVS_RNDIS_MTYPE_DATA, &txd->send_ctx, txr->hn_gpa, txr->hn_gpa_cnt)); } static int hn_txpkt_chim(struct hn_tx_ring *txr, struct hn_txdesc *txd) { struct hn_nvs_rndis rndis; KASSERT(txd->chim_index != HN_NVS_CHIM_IDX_INVALID && txd->chim_size > 0, ("invalid rndis chim txd")); rndis.nvs_type = HN_NVS_TYPE_RNDIS; rndis.nvs_rndis_mtype = HN_NVS_RNDIS_MTYPE_DATA; rndis.nvs_chim_idx = txd->chim_index; rndis.nvs_chim_sz = txd->chim_size; return (hn_nvs_send(txr->hn_chan, VMBUS_CHANPKT_FLAG_RC, &rndis, sizeof(rndis), &txd->send_ctx)); } static __inline uint32_t hn_chim_alloc(struct hn_softc *sc) { int i, bmap_cnt = sc->hn_chim_bmap_cnt; u_long *bmap = sc->hn_chim_bmap; uint32_t ret = HN_NVS_CHIM_IDX_INVALID; for (i = 0; i < bmap_cnt; ++i) { int idx; idx = ffsl(~bmap[i]); if (idx == 0) continue; --idx; /* ffsl is 1-based */ KASSERT(i * LONG_BIT + idx < sc->hn_chim_cnt, ("invalid i %d and idx %d", i, idx)); if (atomic_testandset_long(&bmap[i], idx)) continue; ret = i * LONG_BIT + idx; break; } return (ret); } static __inline void hn_chim_free(struct hn_softc *sc, uint32_t chim_idx) { u_long mask; uint32_t idx; idx = chim_idx / LONG_BIT; KASSERT(idx < sc->hn_chim_bmap_cnt, ("invalid chimney index 0x%x", chim_idx)); mask = 1UL << (chim_idx % LONG_BIT); KASSERT(sc->hn_chim_bmap[idx] & mask, ("index bitmap 0x%lx, chimney index %u, " "bitmap idx %d, bitmask 0x%lx", sc->hn_chim_bmap[idx], chim_idx, idx, mask)); atomic_clear_long(&sc->hn_chim_bmap[idx], mask); } #if defined(INET6) || defined(INET) /* * NOTE: If this function failed, the m_head would be freed. */ static __inline struct mbuf * hn_tso_fixup(struct mbuf *m_head) { struct ether_vlan_header *evl; struct tcphdr *th; int ehlen; KASSERT(M_WRITABLE(m_head), ("TSO mbuf not writable")); #define PULLUP_HDR(m, len) \ do { \ if (__predict_false((m)->m_len < (len))) { \ (m) = m_pullup((m), (len)); \ if ((m) == NULL) \ return (NULL); \ } \ } while (0) PULLUP_HDR(m_head, sizeof(*evl)); evl = mtod(m_head, struct ether_vlan_header *); if (evl->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; else ehlen = ETHER_HDR_LEN; #ifdef INET if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) { struct ip *ip; int iphlen; PULLUP_HDR(m_head, ehlen + sizeof(*ip)); ip = mtodo(m_head, ehlen); iphlen = ip->ip_hl << 2; PULLUP_HDR(m_head, ehlen + iphlen + sizeof(*th)); th = mtodo(m_head, ehlen + iphlen); ip->ip_len = 0; ip->ip_sum = 0; th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } #endif #if defined(INET6) && defined(INET) else #endif #ifdef INET6 { struct ip6_hdr *ip6; PULLUP_HDR(m_head, ehlen + sizeof(*ip6)); ip6 = mtodo(m_head, ehlen); if (ip6->ip6_nxt != IPPROTO_TCP) { m_freem(m_head); return (NULL); } PULLUP_HDR(m_head, ehlen + sizeof(*ip6) + sizeof(*th)); th = mtodo(m_head, ehlen + sizeof(*ip6)); ip6->ip6_plen = 0; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); } #endif return (m_head); #undef PULLUP_HDR } #endif /* INET6 || INET */ static int hn_set_rxfilter(struct hn_softc *sc, uint32_t filter) { int error = 0; HN_LOCK_ASSERT(sc); if (sc->hn_rx_filter != filter) { error = hn_rndis_set_rxfilter(sc, filter); if (!error) sc->hn_rx_filter = filter; } return (error); } static int hn_rxfilter_config(struct hn_softc *sc) { struct ifnet *ifp = sc->hn_ifp; uint32_t filter; HN_LOCK_ASSERT(sc); - if (ifp->if_flags & IFF_PROMISC) { + if ((ifp->if_flags & IFF_PROMISC) || + (sc->hn_flags & HN_FLAG_VF)) { filter = NDIS_PACKET_TYPE_PROMISCUOUS; } else { filter = NDIS_PACKET_TYPE_DIRECTED; if (ifp->if_flags & IFF_BROADCAST) filter |= NDIS_PACKET_TYPE_BROADCAST; /* TODO: support multicast list */ if ((ifp->if_flags & IFF_ALLMULTI) || !TAILQ_EMPTY(&ifp->if_multiaddrs)) filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; } return (hn_set_rxfilter(sc, filter)); } static void hn_set_txagg(struct hn_softc *sc) { uint32_t size, pkts; int i; /* * Setup aggregation size. */ if (sc->hn_agg_size < 0) size = UINT32_MAX; else size = sc->hn_agg_size; if (sc->hn_rndis_agg_size < size) size = sc->hn_rndis_agg_size; /* NOTE: We only aggregate packets using chimney sending buffers. */ if (size > (uint32_t)sc->hn_chim_szmax) size = sc->hn_chim_szmax; if (size <= 2 * HN_PKTSIZE_MIN(sc->hn_rndis_agg_align)) { /* Disable */ size = 0; pkts = 0; goto done; } /* NOTE: Type of the per TX ring setting is 'int'. */ if (size > INT_MAX) size = INT_MAX; /* * Setup aggregation packet count. */ if (sc->hn_agg_pkts < 0) pkts = UINT32_MAX; else pkts = sc->hn_agg_pkts; if (sc->hn_rndis_agg_pkts < pkts) pkts = sc->hn_rndis_agg_pkts; if (pkts <= 1) { /* Disable */ size = 0; pkts = 0; goto done; } /* NOTE: Type of the per TX ring setting is 'short'. */ if (pkts > SHRT_MAX) pkts = SHRT_MAX; done: /* NOTE: Type of the per TX ring setting is 'short'. */ if (sc->hn_rndis_agg_align > SHRT_MAX) { /* Disable */ size = 0; pkts = 0; } if (bootverbose) { if_printf(sc->hn_ifp, "TX agg size %u, pkts %u, align %u\n", size, pkts, sc->hn_rndis_agg_align); } for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; mtx_lock(&txr->hn_tx_lock); txr->hn_agg_szmax = size; txr->hn_agg_pktmax = pkts; txr->hn_agg_align = sc->hn_rndis_agg_align; mtx_unlock(&txr->hn_tx_lock); } } static int hn_get_txswq_depth(const struct hn_tx_ring *txr) { KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet")); if (hn_tx_swq_depth < txr->hn_txdesc_cnt) return txr->hn_txdesc_cnt; return hn_tx_swq_depth; } #ifndef RSS static int hn_rss_reconfig(struct hn_softc *sc) { int error; HN_LOCK_ASSERT(sc); if ((sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) == 0) return (ENXIO); /* * Disable RSS first. * * NOTE: * Direct reconfiguration by setting the UNCHG flags does * _not_ work properly. */ if (bootverbose) if_printf(sc->hn_ifp, "disable RSS\n"); error = hn_rndis_conf_rss(sc, NDIS_RSS_FLAG_DISABLE); if (error) { if_printf(sc->hn_ifp, "RSS disable failed\n"); return (error); } /* * Reenable the RSS w/ the updated RSS key or indirect * table. */ if (bootverbose) if_printf(sc->hn_ifp, "reconfig RSS\n"); error = hn_rndis_conf_rss(sc, NDIS_RSS_FLAG_NONE); if (error) { if_printf(sc->hn_ifp, "RSS reconfig failed\n"); return (error); } return (0); } #endif /* !RSS */ static void hn_rss_ind_fixup(struct hn_softc *sc) { struct ndis_rssprm_toeplitz *rss = &sc->hn_rss; int i, nchan; nchan = sc->hn_rx_ring_inuse; KASSERT(nchan > 1, ("invalid # of channels %d", nchan)); /* * Check indirect table to make sure that all channels in it * can be used. */ for (i = 0; i < NDIS_HASH_INDCNT; ++i) { if (rss->rss_ind[i] >= nchan) { if_printf(sc->hn_ifp, "RSS indirect table %d fixup: %u -> %d\n", i, rss->rss_ind[i], nchan - 1); rss->rss_ind[i] = nchan - 1; } } } static int hn_ifmedia_upd(struct ifnet *ifp __unused) { return EOPNOTSUPP; } static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct hn_softc *sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if ((sc->hn_link_flags & HN_LINK_FLAG_LINKUP) == 0) { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_10G_T | IFM_FDX; } +static void +hn_update_vf_task(void *arg, int pending __unused) +{ + struct hn_update_vf *uv = arg; + + uv->rxr->hn_vf = uv->vf; +} + +static void +hn_update_vf(struct hn_softc *sc, struct ifnet *vf) +{ + struct hn_rx_ring *rxr; + struct hn_update_vf uv; + struct task task; + int i; + + HN_LOCK_ASSERT(sc); + + TASK_INIT(&task, 0, hn_update_vf_task, &uv); + + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + rxr = &sc->hn_rx_ring[i]; + + if (i < sc->hn_rx_ring_inuse) { + uv.rxr = rxr; + uv.vf = vf; + vmbus_chan_run_task(rxr->hn_chan, &task); + } else { + rxr->hn_vf = vf; + } + } +} + +static void +hn_set_vf(struct hn_softc *sc, struct ifnet *ifp, bool vf) +{ + struct ifnet *hn_ifp; + + HN_LOCK(sc); + + if (!(sc->hn_flags & HN_FLAG_SYNTH_ATTACHED)) + goto out; + + hn_ifp = sc->hn_ifp; + + if (ifp == hn_ifp) + goto out; + + if (ifp->if_alloctype != IFT_ETHER) + goto out; + + /* Ignore lagg/vlan interfaces */ + if (strcmp(ifp->if_dname, "lagg") == 0 || + strcmp(ifp->if_dname, "vlan") == 0) + goto out; + + if (bcmp(IF_LLADDR(ifp), IF_LLADDR(hn_ifp), ETHER_ADDR_LEN) != 0) + goto out; + + /* Now we're sure 'ifp' is a real VF device. */ + if (vf) { + if (sc->hn_flags & HN_FLAG_VF) + goto out; + + sc->hn_flags |= HN_FLAG_VF; + hn_rxfilter_config(sc); + } else { + if (!(sc->hn_flags & HN_FLAG_VF)) + goto out; + + sc->hn_flags &= ~HN_FLAG_VF; + if (sc->hn_ifp->if_drv_flags & IFF_DRV_RUNNING) + hn_rxfilter_config(sc); + else + hn_set_rxfilter(sc, NDIS_PACKET_TYPE_NONE); + } + + hn_nvs_set_datapath(sc, + vf ? HN_NVS_DATAPATH_VF : HN_NVS_DATAPATH_SYNTHETIC); + + hn_update_vf(sc, vf ? ifp : NULL); + + if (vf) { + hn_suspend_mgmt(sc); + sc->hn_link_flags &= + ~(HN_LINK_FLAG_LINKUP | HN_LINK_FLAG_NETCHG); + if_link_state_change(sc->hn_ifp, LINK_STATE_DOWN); + } else { + hn_resume_mgmt(sc); + } + + devctl_notify("HYPERV_NIC_VF", if_name(hn_ifp), + vf ? "VF_UP" : "VF_DOWN", NULL); + + if (bootverbose) + if_printf(hn_ifp, "Data path is switched %s %s\n", + vf ? "to" : "from", if_name(ifp)); +out: + HN_UNLOCK(sc); +} + +static void +hn_ifnet_event(void *arg, struct ifnet *ifp, int event) +{ + if (event != IFNET_EVENT_UP && event != IFNET_EVENT_DOWN) + return; + + hn_set_vf(arg, ifp, event == IFNET_EVENT_UP); +} + +static void +hn_ifaddr_event(void *arg, struct ifnet *ifp) +{ + hn_set_vf(arg, ifp, ifp->if_flags & IFF_UP); +} + /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */ static const struct hyperv_guid g_net_vsc_device_type = { .hv_guid = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46, 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E} }; static int hn_probe(device_t dev) { if (VMBUS_PROBE_GUID(device_get_parent(dev), dev, &g_net_vsc_device_type) == 0) { device_set_desc(dev, "Hyper-V Network Interface"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int hn_attach(device_t dev) { struct hn_softc *sc = device_get_softc(dev); struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; uint8_t eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp = NULL; int error, ring_cnt, tx_ring_cnt; sc->hn_dev = dev; sc->hn_prichan = vmbus_get_channel(dev); HN_LOCK_INIT(sc); /* * Initialize these tunables once. */ sc->hn_agg_size = hn_tx_agg_size; sc->hn_agg_pkts = hn_tx_agg_pkts; /* * Setup taskqueue for transmission. */ if (hn_tx_taskq_mode == HN_TX_TASKQ_M_INDEP) { int i; sc->hn_tx_taskqs = malloc(hn_tx_taskq_cnt * sizeof(struct taskqueue *), M_DEVBUF, M_WAITOK); for (i = 0; i < hn_tx_taskq_cnt; ++i) { sc->hn_tx_taskqs[i] = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &sc->hn_tx_taskqs[i]); taskqueue_start_threads(&sc->hn_tx_taskqs[i], 1, PI_NET, "%s tx%d", device_get_nameunit(dev), i); } } else if (hn_tx_taskq_mode == HN_TX_TASKQ_M_GLOBAL) { sc->hn_tx_taskqs = hn_tx_taskque; } /* * Setup taskqueue for mangement tasks, e.g. link status. */ sc->hn_mgmt_taskq0 = taskqueue_create("hn_mgmt", M_WAITOK, taskqueue_thread_enqueue, &sc->hn_mgmt_taskq0); taskqueue_start_threads(&sc->hn_mgmt_taskq0, 1, PI_NET, "%s mgmt", device_get_nameunit(dev)); TASK_INIT(&sc->hn_link_task, 0, hn_link_taskfunc, sc); TASK_INIT(&sc->hn_netchg_init, 0, hn_netchg_init_taskfunc, sc); TIMEOUT_TASK_INIT(sc->hn_mgmt_taskq0, &sc->hn_netchg_status, 0, hn_netchg_status_taskfunc, sc); /* * Allocate ifnet and setup its name earlier, so that if_printf * can be used by functions, which will be called after * ether_ifattach(). */ ifp = sc->hn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); /* * Initialize ifmedia earlier so that it can be unconditionally * destroyed, if error happened later on. */ ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts); /* * Figure out the # of RX rings (ring_cnt) and the # of TX rings * to use (tx_ring_cnt). * * NOTE: * The # of RX rings to use is same as the # of channels to use. */ ring_cnt = hn_chan_cnt; if (ring_cnt <= 0) { /* Default */ ring_cnt = mp_ncpus; if (ring_cnt > HN_RING_CNT_DEF_MAX) ring_cnt = HN_RING_CNT_DEF_MAX; } else if (ring_cnt > mp_ncpus) { ring_cnt = mp_ncpus; } #ifdef RSS if (ring_cnt > rss_getnumbuckets()) ring_cnt = rss_getnumbuckets(); #endif tx_ring_cnt = hn_tx_ring_cnt; if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt) tx_ring_cnt = ring_cnt; #ifdef HN_IFSTART_SUPPORT if (hn_use_if_start) { /* ifnet.if_start only needs one TX ring. */ tx_ring_cnt = 1; } #endif /* * Set the leader CPU for channels. */ sc->hn_cpu = atomic_fetchadd_int(&hn_cpu_index, ring_cnt) % mp_ncpus; /* * Create enough TX/RX rings, even if only limited number of * channels can be allocated. */ error = hn_create_tx_data(sc, tx_ring_cnt); if (error) goto failed; error = hn_create_rx_data(sc, ring_cnt); if (error) goto failed; /* * Create transaction context for NVS and RNDIS transactions. */ sc->hn_xact = vmbus_xact_ctx_create(bus_get_dma_tag(dev), HN_XACT_REQ_SIZE, HN_XACT_RESP_SIZE, 0); if (sc->hn_xact == NULL) { error = ENXIO; goto failed; } /* * Install orphan handler for the revocation of this device's * primary channel. * * NOTE: * The processing order is critical here: * Install the orphan handler, _before_ testing whether this * device's primary channel has been revoked or not. */ vmbus_chan_set_orphan(sc->hn_prichan, sc->hn_xact); if (vmbus_chan_is_revoked(sc->hn_prichan)) { error = ENXIO; goto failed; } /* * Attach the synthetic parts, i.e. NVS and RNDIS. */ error = hn_synth_attach(sc, ETHERMTU); if (error) goto failed; error = hn_rndis_get_eaddr(sc, eaddr); if (error) goto failed; #if __FreeBSD_version >= 1100099 if (sc->hn_rx_ring_inuse > 1) { /* * Reduce TCP segment aggregation limit for multiple * RX rings to increase ACK timeliness. */ hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MULTIRX_DEF); } #endif /* * Fixup TX stuffs after synthetic parts are attached. */ hn_fixup_tx_data(sc); ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "nvs_version", CTLFLAG_RD, &sc->hn_nvs_ver, 0, "NVS version"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "ndis_version", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, hn_ndis_version_sysctl, "A", "NDIS version"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "caps", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, hn_caps_sysctl, "A", "capabilities"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "hwassist", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, hn_hwassist_sysctl, "A", "hwassist"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rxfilter", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, hn_rxfilter_sysctl, "A", "rxfilter"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rss_hash", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, hn_rss_hash_sysctl, "A", "RSS hash"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rss_ind_size", CTLFLAG_RD, &sc->hn_rss_ind_size, 0, "RSS indirect entry count"); #ifndef RSS /* * Don't allow RSS key/indirect table changes, if RSS is defined. */ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rss_key", CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_rss_key_sysctl, "IU", "RSS key"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rss_ind", CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_rss_ind_sysctl, "IU", "RSS indirect table"); #endif SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rndis_agg_size", CTLFLAG_RD, &sc->hn_rndis_agg_size, 0, "RNDIS offered packet transmission aggregation size limit"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rndis_agg_pkts", CTLFLAG_RD, &sc->hn_rndis_agg_pkts, 0, "RNDIS offered packet transmission aggregation count limit"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rndis_agg_align", CTLFLAG_RD, &sc->hn_rndis_agg_align, 0, "RNDIS packet transmission aggregation alignment"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_size", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_txagg_size_sysctl, "I", "Packet transmission aggregation size, 0 -- disable, -1 -- auto"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_pkts", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_txagg_pkts_sysctl, "I", "Packet transmission aggregation packets, " "0 -- disable, -1 -- auto"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "polling", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_polling_sysctl, "I", "Polling frequency: [100,1000000], 0 disable polling"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "vf", + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, + hn_vf_sysctl, "A", "Virtual Function's name"); /* * Setup the ifmedia, which has been initialized earlier. */ ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO); /* XXX ifmedia_set really should do this for us */ sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media; /* * Setup the ifnet for this interface. */ ifp->if_baudrate = IF_Gbps(10); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = hn_ioctl; ifp->if_init = hn_init; #ifdef HN_IFSTART_SUPPORT if (hn_use_if_start) { int qdepth = hn_get_txswq_depth(&sc->hn_tx_ring[0]); ifp->if_start = hn_start; IFQ_SET_MAXLEN(&ifp->if_snd, qdepth); ifp->if_snd.ifq_drv_maxlen = qdepth - 1; IFQ_SET_READY(&ifp->if_snd); } else #endif { ifp->if_transmit = hn_transmit; ifp->if_qflush = hn_xmit_qflush; } ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_LRO; #ifdef foo /* We can't diff IPv6 packets from IPv4 packets on RX path. */ ifp->if_capabilities |= IFCAP_RXCSUM_IPV6; #endif if (sc->hn_caps & HN_CAP_VLAN) { /* XXX not sure about VLAN_MTU. */ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; } ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist; if (ifp->if_hwassist & HN_CSUM_IP_MASK) ifp->if_capabilities |= IFCAP_TXCSUM; if (ifp->if_hwassist & HN_CSUM_IP6_MASK) ifp->if_capabilities |= IFCAP_TXCSUM_IPV6; if (sc->hn_caps & HN_CAP_TSO4) { ifp->if_capabilities |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_IP_TSO; } if (sc->hn_caps & HN_CAP_TSO6) { ifp->if_capabilities |= IFCAP_TSO6; ifp->if_hwassist |= CSUM_IP6_TSO; } /* Enable all available capabilities by default. */ ifp->if_capenable = ifp->if_capabilities; /* * Disable IPv6 TSO and TXCSUM by default, they still can * be enabled through SIOCSIFCAP. */ ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); ifp->if_hwassist &= ~(HN_CSUM_IP6_MASK | CSUM_IP6_TSO); if (ifp->if_capabilities & (IFCAP_TSO6 | IFCAP_TSO4)) { hn_set_tso_maxsize(sc, hn_tso_maxlen, ETHERMTU); ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; } ether_ifattach(ifp, eaddr); if ((ifp->if_capabilities & (IFCAP_TSO6 | IFCAP_TSO4)) && bootverbose) { if_printf(ifp, "TSO segcnt %u segsz %u\n", ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize); } /* Inform the upper layer about the long frame support. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* * Kick off link status check. */ sc->hn_mgmt_taskq = sc->hn_mgmt_taskq0; hn_update_link_status(sc); + sc->hn_ifnet_evthand = EVENTHANDLER_REGISTER(ifnet_event, + hn_ifnet_event, sc, EVENTHANDLER_PRI_ANY); + + sc->hn_ifaddr_evthand = EVENTHANDLER_REGISTER(ifaddr_event, + hn_ifaddr_event, sc, EVENTHANDLER_PRI_ANY); + return (0); failed: if (sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) hn_synth_detach(sc); hn_detach(dev); return (error); } static int hn_detach(device_t dev) { struct hn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->hn_ifp; + if (sc->hn_ifaddr_evthand != NULL) + EVENTHANDLER_DEREGISTER(ifaddr_event, sc->hn_ifaddr_evthand); + if (sc->hn_ifnet_evthand != NULL) + EVENTHANDLER_DEREGISTER(ifnet_event, sc->hn_ifnet_evthand); + if (sc->hn_xact != NULL && vmbus_chan_is_revoked(sc->hn_prichan)) { /* * In case that the vmbus missed the orphan handler * installation. */ vmbus_xact_ctx_orphan(sc->hn_xact); } if (device_is_attached(dev)) { HN_LOCK(sc); if (sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) - hn_stop(sc); + hn_stop(sc, true); /* * NOTE: * hn_stop() only suspends data, so managment * stuffs have to be suspended manually here. */ hn_suspend_mgmt(sc); hn_synth_detach(sc); } HN_UNLOCK(sc); ether_ifdetach(ifp); } ifmedia_removeall(&sc->hn_media); hn_destroy_rx_data(sc); hn_destroy_tx_data(sc); if (sc->hn_tx_taskqs != NULL && sc->hn_tx_taskqs != hn_tx_taskque) { int i; for (i = 0; i < hn_tx_taskq_cnt; ++i) taskqueue_free(sc->hn_tx_taskqs[i]); free(sc->hn_tx_taskqs, M_DEVBUF); } taskqueue_free(sc->hn_mgmt_taskq0); if (sc->hn_xact != NULL) { /* * Uninstall the orphan handler _before_ the xact is * destructed. */ vmbus_chan_unset_orphan(sc->hn_prichan); vmbus_xact_ctx_destroy(sc->hn_xact); } if_free(ifp); HN_LOCK_DESTROY(sc); return (0); } static int hn_shutdown(device_t dev) { return (0); } static void hn_link_status(struct hn_softc *sc) { uint32_t link_status; int error; error = hn_rndis_get_linkstatus(sc, &link_status); if (error) { /* XXX what to do? */ return; } if (link_status == NDIS_MEDIA_STATE_CONNECTED) sc->hn_link_flags |= HN_LINK_FLAG_LINKUP; else sc->hn_link_flags &= ~HN_LINK_FLAG_LINKUP; if_link_state_change(sc->hn_ifp, (sc->hn_link_flags & HN_LINK_FLAG_LINKUP) ? LINK_STATE_UP : LINK_STATE_DOWN); } static void hn_link_taskfunc(void *xsc, int pending __unused) { struct hn_softc *sc = xsc; if (sc->hn_link_flags & HN_LINK_FLAG_NETCHG) return; hn_link_status(sc); } static void hn_netchg_init_taskfunc(void *xsc, int pending __unused) { struct hn_softc *sc = xsc; /* Prevent any link status checks from running. */ sc->hn_link_flags |= HN_LINK_FLAG_NETCHG; /* * Fake up a [link down --> link up] state change; 5 seconds * delay is used, which closely simulates miibus reaction * upon link down event. */ sc->hn_link_flags &= ~HN_LINK_FLAG_LINKUP; if_link_state_change(sc->hn_ifp, LINK_STATE_DOWN); taskqueue_enqueue_timeout(sc->hn_mgmt_taskq0, &sc->hn_netchg_status, 5 * hz); } static void hn_netchg_status_taskfunc(void *xsc, int pending __unused) { struct hn_softc *sc = xsc; /* Re-allow link status checks. */ sc->hn_link_flags &= ~HN_LINK_FLAG_NETCHG; hn_link_status(sc); } static void hn_update_link_status(struct hn_softc *sc) { if (sc->hn_mgmt_taskq != NULL) taskqueue_enqueue(sc->hn_mgmt_taskq, &sc->hn_link_task); } static void hn_change_network(struct hn_softc *sc) { if (sc->hn_mgmt_taskq != NULL) taskqueue_enqueue(sc->hn_mgmt_taskq, &sc->hn_netchg_init); } static __inline int hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs) { struct mbuf *m = *m_head; int error; KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID, ("txd uses chim")); error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { struct mbuf *m_new; m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX); if (m_new == NULL) return ENOBUFS; else *m_head = m = m_new; txr->hn_tx_collapsed++; error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); } if (!error) { bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_PREWRITE); txd->flags |= HN_TXD_FLAG_DMAMAP; } return error; } static __inline int hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) { KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0, ("put an onlist txd %#x", txd->flags)); KASSERT((txd->flags & HN_TXD_FLAG_ONAGG) == 0, ("put an onagg txd %#x", txd->flags)); KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs)); if (atomic_fetchadd_int(&txd->refs, -1) != 1) return 0; if (!STAILQ_EMPTY(&txd->agg_list)) { struct hn_txdesc *tmp_txd; while ((tmp_txd = STAILQ_FIRST(&txd->agg_list)) != NULL) { int freed; KASSERT(STAILQ_EMPTY(&tmp_txd->agg_list), ("resursive aggregation on aggregated txdesc")); KASSERT((tmp_txd->flags & HN_TXD_FLAG_ONAGG), ("not aggregated txdesc")); KASSERT((tmp_txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("aggregated txdesc uses dmamap")); KASSERT(tmp_txd->chim_index == HN_NVS_CHIM_IDX_INVALID, ("aggregated txdesc consumes " "chimney sending buffer")); KASSERT(tmp_txd->chim_size == 0, ("aggregated txdesc has non-zero " "chimney sending size")); STAILQ_REMOVE_HEAD(&txd->agg_list, agg_link); tmp_txd->flags &= ~HN_TXD_FLAG_ONAGG; freed = hn_txdesc_put(txr, tmp_txd); KASSERT(freed, ("failed to free aggregated txdesc")); } } if (txd->chim_index != HN_NVS_CHIM_IDX_INVALID) { KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("chim txd uses dmamap")); hn_chim_free(txr->hn_sc, txd->chim_index); txd->chim_index = HN_NVS_CHIM_IDX_INVALID; txd->chim_size = 0; } else if (txd->flags & HN_TXD_FLAG_DMAMAP) { bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->hn_tx_data_dtag, txd->data_dmap); txd->flags &= ~HN_TXD_FLAG_DMAMAP; } if (txd->m != NULL) { m_freem(txd->m); txd->m = NULL; } txd->flags |= HN_TXD_FLAG_ONLIST; #ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); KASSERT(txr->hn_txdesc_avail >= 0 && txr->hn_txdesc_avail < txr->hn_txdesc_cnt, ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail)); txr->hn_txdesc_avail++; SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); mtx_unlock_spin(&txr->hn_txlist_spin); #else /* HN_USE_TXDESC_BUFRING */ #ifdef HN_DEBUG atomic_add_int(&txr->hn_txdesc_avail, 1); #endif buf_ring_enqueue(txr->hn_txdesc_br, txd); #endif /* !HN_USE_TXDESC_BUFRING */ return 1; } static __inline struct hn_txdesc * hn_txdesc_get(struct hn_tx_ring *txr) { struct hn_txdesc *txd; #ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); txd = SLIST_FIRST(&txr->hn_txlist); if (txd != NULL) { KASSERT(txr->hn_txdesc_avail > 0, ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail)); txr->hn_txdesc_avail--; SLIST_REMOVE_HEAD(&txr->hn_txlist, link); } mtx_unlock_spin(&txr->hn_txlist_spin); #else txd = buf_ring_dequeue_sc(txr->hn_txdesc_br); #endif if (txd != NULL) { #ifdef HN_USE_TXDESC_BUFRING #ifdef HN_DEBUG atomic_subtract_int(&txr->hn_txdesc_avail, 1); #endif #endif /* HN_USE_TXDESC_BUFRING */ KASSERT(txd->m == NULL && txd->refs == 0 && STAILQ_EMPTY(&txd->agg_list) && txd->chim_index == HN_NVS_CHIM_IDX_INVALID && txd->chim_size == 0 && (txd->flags & HN_TXD_FLAG_ONLIST) && (txd->flags & HN_TXD_FLAG_ONAGG) == 0 && (txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("invalid txd")); txd->flags &= ~HN_TXD_FLAG_ONLIST; txd->refs = 1; } return txd; } static __inline void hn_txdesc_hold(struct hn_txdesc *txd) { /* 0->1 transition will never work */ KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs)); atomic_add_int(&txd->refs, 1); } static __inline void hn_txdesc_agg(struct hn_txdesc *agg_txd, struct hn_txdesc *txd) { KASSERT((agg_txd->flags & HN_TXD_FLAG_ONAGG) == 0, ("recursive aggregation on aggregating txdesc")); KASSERT((txd->flags & HN_TXD_FLAG_ONAGG) == 0, ("already aggregated")); KASSERT(STAILQ_EMPTY(&txd->agg_list), ("recursive aggregation on to-be-aggregated txdesc")); txd->flags |= HN_TXD_FLAG_ONAGG; STAILQ_INSERT_TAIL(&agg_txd->agg_list, txd, agg_link); } static bool hn_tx_ring_pending(struct hn_tx_ring *txr) { bool pending = false; #ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); if (txr->hn_txdesc_avail != txr->hn_txdesc_cnt) pending = true; mtx_unlock_spin(&txr->hn_txlist_spin); #else if (!buf_ring_full(txr->hn_txdesc_br)) pending = true; #endif return (pending); } static __inline void hn_txeof(struct hn_tx_ring *txr) { txr->hn_has_txeof = 0; txr->hn_txeof(txr); } static void hn_txpkt_done(struct hn_nvs_sendctx *sndc, struct hn_softc *sc, struct vmbus_channel *chan, const void *data __unused, int dlen __unused) { struct hn_txdesc *txd = sndc->hn_cbarg; struct hn_tx_ring *txr; txr = txd->txr; KASSERT(txr->hn_chan == chan, ("channel mismatch, on chan%u, should be chan%u", vmbus_chan_id(chan), vmbus_chan_id(txr->hn_chan))); txr->hn_has_txeof = 1; hn_txdesc_put(txr, txd); ++txr->hn_txdone_cnt; if (txr->hn_txdone_cnt >= HN_EARLY_TXEOF_THRESH) { txr->hn_txdone_cnt = 0; if (txr->hn_oactive) hn_txeof(txr); } } static void hn_chan_rollup(struct hn_rx_ring *rxr, struct hn_tx_ring *txr) { #if defined(INET) || defined(INET6) tcp_lro_flush_all(&rxr->hn_lro); #endif /* * NOTE: * 'txr' could be NULL, if multiple channels and * ifnet.if_start method are enabled. */ if (txr == NULL || !txr->hn_has_txeof) return; txr->hn_txdone_cnt = 0; hn_txeof(txr); } static __inline uint32_t hn_rndis_pktmsg_offset(uint32_t ofs) { KASSERT(ofs >= sizeof(struct rndis_packet_msg), ("invalid RNDIS packet msg offset %u", ofs)); return (ofs - __offsetof(struct rndis_packet_msg, rm_dataoffset)); } static __inline void * hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize, size_t pi_dlen, uint32_t pi_type) { const size_t pi_size = HN_RNDIS_PKTINFO_SIZE(pi_dlen); struct rndis_pktinfo *pi; KASSERT((pi_size & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK) == 0, ("unaligned pktinfo size %zu, pktinfo dlen %zu", pi_size, pi_dlen)); /* * Per-packet-info does not move; it only grows. * * NOTE: * rm_pktinfooffset in this phase counts from the beginning * of rndis_packet_msg. */ KASSERT(pkt->rm_pktinfooffset + pkt->rm_pktinfolen + pi_size <= pktsize, ("%u pktinfo overflows RNDIS packet msg", pi_type)); pi = (struct rndis_pktinfo *)((uint8_t *)pkt + pkt->rm_pktinfooffset + pkt->rm_pktinfolen); pkt->rm_pktinfolen += pi_size; pi->rm_size = pi_size; pi->rm_type = pi_type; pi->rm_pktinfooffset = RNDIS_PKTINFO_OFFSET; /* Data immediately follow per-packet-info. */ pkt->rm_dataoffset += pi_size; /* Update RNDIS packet msg length */ pkt->rm_len += pi_size; return (pi->rm_data); } static __inline int hn_flush_txagg(struct ifnet *ifp, struct hn_tx_ring *txr) { struct hn_txdesc *txd; struct mbuf *m; int error, pkts; txd = txr->hn_agg_txd; KASSERT(txd != NULL, ("no aggregate txdesc")); /* * Since hn_txpkt() will reset this temporary stat, save * it now, so that oerrors can be updated properly, if * hn_txpkt() ever fails. */ pkts = txr->hn_stat_pkts; /* * Since txd's mbuf will _not_ be freed upon hn_txpkt() * failure, save it for later freeing, if hn_txpkt() ever * fails. */ m = txd->m; error = hn_txpkt(ifp, txr, txd); if (__predict_false(error)) { /* txd is freed, but m is not. */ m_freem(m); txr->hn_flush_failed++; if_inc_counter(ifp, IFCOUNTER_OERRORS, pkts); } /* Reset all aggregation states. */ txr->hn_agg_txd = NULL; txr->hn_agg_szleft = 0; txr->hn_agg_pktleft = 0; txr->hn_agg_prevpkt = NULL; return (error); } static void * hn_try_txagg(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd, int pktsize) { void *chim; if (txr->hn_agg_txd != NULL) { if (txr->hn_agg_pktleft >= 1 && txr->hn_agg_szleft > pktsize) { struct hn_txdesc *agg_txd = txr->hn_agg_txd; struct rndis_packet_msg *pkt = txr->hn_agg_prevpkt; int olen; /* * Update the previous RNDIS packet's total length, * it can be increased due to the mandatory alignment * padding for this RNDIS packet. And update the * aggregating txdesc's chimney sending buffer size * accordingly. * * XXX * Zero-out the padding, as required by the RNDIS spec. */ olen = pkt->rm_len; pkt->rm_len = roundup2(olen, txr->hn_agg_align); agg_txd->chim_size += pkt->rm_len - olen; /* Link this txdesc to the parent. */ hn_txdesc_agg(agg_txd, txd); chim = (uint8_t *)pkt + pkt->rm_len; /* Save the current packet for later fixup. */ txr->hn_agg_prevpkt = chim; txr->hn_agg_pktleft--; txr->hn_agg_szleft -= pktsize; if (txr->hn_agg_szleft <= HN_PKTSIZE_MIN(txr->hn_agg_align)) { /* * Probably can't aggregate more packets, * flush this aggregating txdesc proactively. */ txr->hn_agg_pktleft = 0; } /* Done! */ return (chim); } hn_flush_txagg(ifp, txr); } KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); txr->hn_tx_chimney_tried++; txd->chim_index = hn_chim_alloc(txr->hn_sc); if (txd->chim_index == HN_NVS_CHIM_IDX_INVALID) return (NULL); txr->hn_tx_chimney++; chim = txr->hn_sc->hn_chim + (txd->chim_index * txr->hn_sc->hn_chim_szmax); if (txr->hn_agg_pktmax > 1 && txr->hn_agg_szmax > pktsize + HN_PKTSIZE_MIN(txr->hn_agg_align)) { txr->hn_agg_txd = txd; txr->hn_agg_pktleft = txr->hn_agg_pktmax - 1; txr->hn_agg_szleft = txr->hn_agg_szmax - pktsize; txr->hn_agg_prevpkt = chim; } return (chim); } /* * NOTE: * If this function fails, then both txd and m_head0 will be freed. */ static int hn_encap(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0) { bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX]; int error, nsegs, i; struct mbuf *m_head = *m_head0; struct rndis_packet_msg *pkt; uint32_t *pi_data; void *chim = NULL; int pkt_hlen, pkt_size; pkt = txd->rndis_pkt; pkt_size = HN_PKTSIZE(m_head, txr->hn_agg_align); if (pkt_size < txr->hn_chim_size) { chim = hn_try_txagg(ifp, txr, txd, pkt_size); if (chim != NULL) pkt = chim; } else { if (txr->hn_agg_txd != NULL) hn_flush_txagg(ifp, txr); } pkt->rm_type = REMOTE_NDIS_PACKET_MSG; pkt->rm_len = sizeof(*pkt) + m_head->m_pkthdr.len; pkt->rm_dataoffset = sizeof(*pkt); pkt->rm_datalen = m_head->m_pkthdr.len; pkt->rm_oobdataoffset = 0; pkt->rm_oobdatalen = 0; pkt->rm_oobdataelements = 0; pkt->rm_pktinfooffset = sizeof(*pkt); pkt->rm_pktinfolen = 0; pkt->rm_vchandle = 0; pkt->rm_reserved = 0; if (txr->hn_tx_flags & HN_TX_FLAG_HASHVAL) { /* * Set the hash value for this packet, so that the host could * dispatch the TX done event for this packet back to this TX * ring's channel. */ pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN, HN_NDIS_HASH_VALUE_SIZE, HN_NDIS_PKTINFO_TYPE_HASHVAL); *pi_data = txr->hn_tx_idx; } if (m_head->m_flags & M_VLANTAG) { pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN, NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN); *pi_data = NDIS_VLAN_INFO_MAKE( EVL_VLANOFTAG(m_head->m_pkthdr.ether_vtag), EVL_PRIOFTAG(m_head->m_pkthdr.ether_vtag), EVL_CFIOFTAG(m_head->m_pkthdr.ether_vtag)); } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { #if defined(INET6) || defined(INET) pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN, NDIS_LSO2_INFO_SIZE, NDIS_PKTINFO_TYPE_LSO); #ifdef INET if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) { *pi_data = NDIS_LSO2_INFO_MAKEIPV4(0, m_head->m_pkthdr.tso_segsz); } #endif #if defined(INET6) && defined(INET) else #endif #ifdef INET6 { *pi_data = NDIS_LSO2_INFO_MAKEIPV6(0, m_head->m_pkthdr.tso_segsz); } #endif #endif /* INET6 || INET */ } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) { pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN, NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM); if (m_head->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP)) { *pi_data = NDIS_TXCSUM_INFO_IPV6; } else { *pi_data = NDIS_TXCSUM_INFO_IPV4; if (m_head->m_pkthdr.csum_flags & CSUM_IP) *pi_data |= NDIS_TXCSUM_INFO_IPCS; } if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) *pi_data |= NDIS_TXCSUM_INFO_TCPCS; else if (m_head->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) *pi_data |= NDIS_TXCSUM_INFO_UDPCS; } pkt_hlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen; /* Convert RNDIS packet message offsets */ pkt->rm_dataoffset = hn_rndis_pktmsg_offset(pkt->rm_dataoffset); pkt->rm_pktinfooffset = hn_rndis_pktmsg_offset(pkt->rm_pktinfooffset); /* * Fast path: Chimney sending. */ if (chim != NULL) { struct hn_txdesc *tgt_txd = txd; if (txr->hn_agg_txd != NULL) { tgt_txd = txr->hn_agg_txd; #ifdef INVARIANTS *m_head0 = NULL; #endif } KASSERT(pkt == chim, ("RNDIS pkt not in chimney sending buffer")); KASSERT(tgt_txd->chim_index != HN_NVS_CHIM_IDX_INVALID, ("chimney sending buffer is not used")); tgt_txd->chim_size += pkt->rm_len; m_copydata(m_head, 0, m_head->m_pkthdr.len, ((uint8_t *)chim) + pkt_hlen); txr->hn_gpa_cnt = 0; txr->hn_sendpkt = hn_txpkt_chim; goto done; } KASSERT(txr->hn_agg_txd == NULL, ("aggregating sglist txdesc")); KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID, ("chimney buffer is used")); KASSERT(pkt == txd->rndis_pkt, ("RNDIS pkt not in txdesc")); error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs); if (__predict_false(error)) { int freed; /* * This mbuf is not linked w/ the txd yet, so free it now. */ m_freem(m_head); *m_head0 = NULL; freed = hn_txdesc_put(txr, txd); KASSERT(freed != 0, ("fail to free txd upon txdma error")); txr->hn_txdma_failed++; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return error; } *m_head0 = m_head; /* +1 RNDIS packet message */ txr->hn_gpa_cnt = nsegs + 1; /* send packet with page buffer */ txr->hn_gpa[0].gpa_page = atop(txd->rndis_pkt_paddr); txr->hn_gpa[0].gpa_ofs = txd->rndis_pkt_paddr & PAGE_MASK; txr->hn_gpa[0].gpa_len = pkt_hlen; /* * Fill the page buffers with mbuf info after the page * buffer for RNDIS packet message. */ for (i = 0; i < nsegs; ++i) { struct vmbus_gpa *gpa = &txr->hn_gpa[i + 1]; gpa->gpa_page = atop(segs[i].ds_addr); gpa->gpa_ofs = segs[i].ds_addr & PAGE_MASK; gpa->gpa_len = segs[i].ds_len; } txd->chim_index = HN_NVS_CHIM_IDX_INVALID; txd->chim_size = 0; txr->hn_sendpkt = hn_txpkt_sglist; done: txd->m = m_head; /* Set the completion routine */ hn_nvs_sendctx_init(&txd->send_ctx, hn_txpkt_done, txd); /* Update temporary stats for later use. */ txr->hn_stat_pkts++; txr->hn_stat_size += m_head->m_pkthdr.len; if (m_head->m_flags & M_MCAST) txr->hn_stat_mcasts++; return 0; } /* * NOTE: * If this function fails, then txd will be freed, but the mbuf * associated w/ the txd will _not_ be freed. */ static int hn_txpkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd) { int error, send_failed = 0, has_bpf; again: has_bpf = bpf_peers_present(ifp->if_bpf); if (has_bpf) { /* * Make sure that this txd and any aggregated txds are not * freed before ETHER_BPF_MTAP. */ hn_txdesc_hold(txd); } error = txr->hn_sendpkt(txr, txd); if (!error) { if (has_bpf) { const struct hn_txdesc *tmp_txd; ETHER_BPF_MTAP(ifp, txd->m); STAILQ_FOREACH(tmp_txd, &txd->agg_list, agg_link) ETHER_BPF_MTAP(ifp, tmp_txd->m); } if_inc_counter(ifp, IFCOUNTER_OPACKETS, txr->hn_stat_pkts); #ifdef HN_IFSTART_SUPPORT if (!hn_use_if_start) #endif { if_inc_counter(ifp, IFCOUNTER_OBYTES, txr->hn_stat_size); if (txr->hn_stat_mcasts != 0) { if_inc_counter(ifp, IFCOUNTER_OMCASTS, txr->hn_stat_mcasts); } } txr->hn_pkts += txr->hn_stat_pkts; txr->hn_sends++; } if (has_bpf) hn_txdesc_put(txr, txd); if (__predict_false(error)) { int freed; /* * This should "really rarely" happen. * * XXX Too many RX to be acked or too many sideband * commands to run? Ask netvsc_channel_rollup() * to kick start later. */ txr->hn_has_txeof = 1; if (!send_failed) { txr->hn_send_failed++; send_failed = 1; /* * Try sending again after set hn_has_txeof; * in case that we missed the last * netvsc_channel_rollup(). */ goto again; } if_printf(ifp, "send failed\n"); /* * Caller will perform further processing on the * associated mbuf, so don't free it in hn_txdesc_put(); * only unload it from the DMA map in hn_txdesc_put(), * if it was loaded. */ txd->m = NULL; freed = hn_txdesc_put(txr, txd); KASSERT(freed != 0, ("fail to free txd upon send error")); txr->hn_send_failed++; } /* Reset temporary stats, after this sending is done. */ txr->hn_stat_size = 0; txr->hn_stat_pkts = 0; txr->hn_stat_mcasts = 0; return (error); } /* * Append the specified data to the indicated mbuf chain, * Extend the mbuf chain if the new data does not fit in * existing space. * * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c. * There should be an equivalent in the kernel mbuf code, * but there does not appear to be one yet. * * Differs from m_append() in that additional mbufs are * allocated with cluster size MJUMPAGESIZE, and filled * accordingly. * * Return 1 if able to complete the job; otherwise 0. */ static int hv_m_append(struct mbuf *m0, int len, c_caddr_t cp) { struct mbuf *m, *n; int remainder, space; for (m = m0; m->m_next != NULL; m = m->m_next) ; remainder = len; space = M_TRAILINGSPACE(m); if (space > 0) { /* * Copy into available space. */ if (space > remainder) space = remainder; bcopy(cp, mtod(m, caddr_t) + m->m_len, space); m->m_len += space; cp += space; remainder -= space; } while (remainder > 0) { /* * Allocate a new mbuf; could check space * and allocate a cluster instead. */ n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE); if (n == NULL) break; n->m_len = min(MJUMPAGESIZE, remainder); bcopy(cp, mtod(n, caddr_t), n->m_len); cp += n->m_len; remainder -= n->m_len; m->m_next = n; m = n; } if (m0->m_flags & M_PKTHDR) m0->m_pkthdr.len += len - remainder; return (remainder == 0); } #if defined(INET) || defined(INET6) static __inline int hn_lro_rx(struct lro_ctrl *lc, struct mbuf *m) { #if __FreeBSD_version >= 1100095 if (hn_lro_mbufq_depth) { tcp_lro_queue_mbuf(lc, m); return 0; } #endif return tcp_lro_rx(lc, m, 0); } #endif static int hn_rxpkt(struct hn_rx_ring *rxr, const void *data, int dlen, const struct hn_rxinfo *info) { - struct ifnet *ifp = rxr->hn_ifp; + struct ifnet *ifp; struct mbuf *m_new; int size, do_lro = 0, do_csum = 1; int hash_type; - if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - return (0); + /* If the VF is active, inject the packet through the VF */ + ifp = rxr->hn_vf ? rxr->hn_vf : rxr->hn_ifp; - /* - * Bail out if packet contains more data than configured MTU. - */ - if (dlen > (ifp->if_mtu + ETHER_HDR_LEN)) { - return (0); - } else if (dlen <= MHLEN) { + if (dlen <= MHLEN) { m_new = m_gethdr(M_NOWAIT, MT_DATA); if (m_new == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (0); } memcpy(mtod(m_new, void *), data, dlen); m_new->m_pkthdr.len = m_new->m_len = dlen; rxr->hn_small_pkts++; } else { /* * Get an mbuf with a cluster. For packets 2K or less, * get a standard 2K cluster. For anything larger, get a * 4K cluster. Any buffers larger than 4K can cause problems * if looped around to the Hyper-V TX channel, so avoid them. */ size = MCLBYTES; if (dlen > MCLBYTES) { /* 4096 */ size = MJUMPAGESIZE; } m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); if (m_new == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (0); } hv_m_append(m_new, dlen, data); } m_new->m_pkthdr.rcvif = ifp; if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0)) do_csum = 0; /* receive side checksum offload */ if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) { /* IP csum offload */ if ((info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK) && do_csum) { m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); rxr->hn_csum_ip++; } /* TCP/UDP csum offload */ if ((info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK | NDIS_RXCSUM_INFO_TCPCS_OK)) && do_csum) { m_new->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; if (info->csum_info & NDIS_RXCSUM_INFO_TCPCS_OK) rxr->hn_csum_tcp++; else rxr->hn_csum_udp++; } /* * XXX * As of this write (Oct 28th, 2016), host side will turn * on only TCPCS_OK and IPCS_OK even for UDP datagrams, so * the do_lro setting here is actually _not_ accurate. We * depend on the RSS hash type check to reset do_lro. */ if ((info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_OK | NDIS_RXCSUM_INFO_IPCS_OK)) == (NDIS_RXCSUM_INFO_TCPCS_OK | NDIS_RXCSUM_INFO_IPCS_OK)) do_lro = 1; } else { const struct ether_header *eh; uint16_t etype; int hoff; hoff = sizeof(*eh); if (m_new->m_len < hoff) goto skip; eh = mtod(m_new, struct ether_header *); etype = ntohs(eh->ether_type); if (etype == ETHERTYPE_VLAN) { const struct ether_vlan_header *evl; hoff = sizeof(*evl); if (m_new->m_len < hoff) goto skip; evl = mtod(m_new, struct ether_vlan_header *); etype = ntohs(evl->evl_proto); } if (etype == ETHERTYPE_IP) { int pr; pr = hn_check_iplen(m_new, hoff); if (pr == IPPROTO_TCP) { if (do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_TCP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; } do_lro = 1; } else if (pr == IPPROTO_UDP) { if (do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_UDP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; } } else if (pr != IPPROTO_DONE && do_csum && (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) { rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); } } } skip: if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { m_new->m_pkthdr.ether_vtag = EVL_MAKETAG( NDIS_VLAN_INFO_ID(info->vlan_info), NDIS_VLAN_INFO_PRI(info->vlan_info), NDIS_VLAN_INFO_CFI(info->vlan_info)); m_new->m_flags |= M_VLANTAG; } if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) { rxr->hn_rss_pkts++; m_new->m_pkthdr.flowid = info->hash_value; hash_type = M_HASHTYPE_OPAQUE_HASH; if ((info->hash_info & NDIS_HASH_FUNCTION_MASK) == NDIS_HASH_FUNCTION_TOEPLITZ) { uint32_t type = (info->hash_info & NDIS_HASH_TYPE_MASK); /* * NOTE: * do_lro is resetted, if the hash types are not TCP * related. See the comment in the above csum_flags * setup section. */ switch (type) { case NDIS_HASH_IPV4: hash_type = M_HASHTYPE_RSS_IPV4; do_lro = 0; break; case NDIS_HASH_TCP_IPV4: hash_type = M_HASHTYPE_RSS_TCP_IPV4; break; case NDIS_HASH_IPV6: hash_type = M_HASHTYPE_RSS_IPV6; do_lro = 0; break; case NDIS_HASH_IPV6_EX: hash_type = M_HASHTYPE_RSS_IPV6_EX; do_lro = 0; break; case NDIS_HASH_TCP_IPV6: hash_type = M_HASHTYPE_RSS_TCP_IPV6; break; case NDIS_HASH_TCP_IPV6_EX: hash_type = M_HASHTYPE_RSS_TCP_IPV6_EX; break; } } } else { m_new->m_pkthdr.flowid = rxr->hn_rx_idx; hash_type = M_HASHTYPE_OPAQUE; } M_HASHTYPE_SET(m_new, hash_type); /* * Note: Moved RX completion back to hv_nv_on_receive() so all * messages (not just data messages) will trigger a response. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rxr->hn_pkts++; if ((ifp->if_capenable & IFCAP_LRO) && do_lro) { #if defined(INET) || defined(INET6) struct lro_ctrl *lro = &rxr->hn_lro; if (lro->lro_cnt) { rxr->hn_lro_tried++; if (hn_lro_rx(lro, m_new) == 0) { /* DONE! */ return 0; } } #endif } /* We're not holding the lock here, so don't release it */ (*ifp->if_input)(ifp, m_new); return (0); } static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct hn_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int mask, error = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu > HN_MTU_MAX) { error = EINVAL; break; } HN_LOCK(sc); if ((sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) == 0) { HN_UNLOCK(sc); break; } if ((sc->hn_caps & HN_CAP_MTU) == 0) { /* Can't change MTU */ HN_UNLOCK(sc); error = EOPNOTSUPP; break; } if (ifp->if_mtu == ifr->ifr_mtu) { HN_UNLOCK(sc); break; } /* * Suspend this interface before the synthetic parts * are ripped. */ hn_suspend(sc); /* * Detach the synthetics parts, i.e. NVS and RNDIS. */ hn_synth_detach(sc); /* * Reattach the synthetic parts, i.e. NVS and RNDIS, * with the new MTU setting. */ error = hn_synth_attach(sc, ifr->ifr_mtu); if (error) { HN_UNLOCK(sc); break; } /* * Commit the requested MTU, after the synthetic parts * have been successfully attached. */ ifp->if_mtu = ifr->ifr_mtu; /* * Make sure that various parameters based on MTU are * still valid, after the MTU change. */ if (sc->hn_tx_ring[0].hn_chim_size > sc->hn_chim_szmax) hn_set_chim_size(sc, sc->hn_chim_szmax); hn_set_tso_maxsize(sc, hn_tso_maxlen, ifp->if_mtu); #if __FreeBSD_version >= 1100099 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim < HN_LRO_LENLIM_MIN(ifp)) hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MIN(ifp)); #endif /* * All done! Resume the interface now. */ hn_resume(sc); HN_UNLOCK(sc); break; case SIOCSIFFLAGS: HN_LOCK(sc); if ((sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) == 0) { HN_UNLOCK(sc); break; } if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Caller meight hold mutex, e.g. * bpf; use busy-wait for the RNDIS * reply. */ HN_NO_SLEEPING(sc); hn_rxfilter_config(sc); HN_SLEEPING_OK(sc); } else { hn_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) - hn_stop(sc); + hn_stop(sc, false); } sc->hn_if_flags = ifp->if_flags; HN_UNLOCK(sc); break; case SIOCSIFCAP: HN_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= HN_CSUM_IP_HWASSIST(sc); else ifp->if_hwassist &= ~HN_CSUM_IP_HWASSIST(sc); } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= HN_CSUM_IP6_HWASSIST(sc); else ifp->if_hwassist &= ~HN_CSUM_IP6_HWASSIST(sc); } /* TODO: flip RNDIS offload parameters for RXCSUM. */ if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; #ifdef foo /* We can't diff IPv6 packets from IPv4 packets on RX path. */ if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; #endif if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; else ifp->if_hwassist &= ~CSUM_IP_TSO; } if (mask & IFCAP_TSO6) { ifp->if_capenable ^= IFCAP_TSO6; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; else ifp->if_hwassist &= ~CSUM_IP6_TSO; } HN_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: HN_LOCK(sc); if ((sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) == 0) { HN_UNLOCK(sc); break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Multicast uses mutex; use busy-wait for * the RNDIS reply. */ HN_NO_SLEEPING(sc); hn_rxfilter_config(sc); HN_SLEEPING_OK(sc); } HN_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void -hn_stop(struct hn_softc *sc) +hn_stop(struct hn_softc *sc, bool detaching) { struct ifnet *ifp = sc->hn_ifp; int i; HN_LOCK_ASSERT(sc); KASSERT(sc->hn_flags & HN_FLAG_SYNTH_ATTACHED, ("synthetic parts were not attached")); /* Disable polling. */ hn_polling(sc, 0); /* Clear RUNNING bit _before_ hn_suspend_data() */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_RUNNING); hn_suspend_data(sc); /* Clear OACTIVE bit. */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); for (i = 0; i < sc->hn_tx_ring_inuse; ++i) sc->hn_tx_ring[i].hn_oactive = 0; + + /* + * If the VF is active, make sure the filter is not 0, even if + * the synthetic NIC is down. + */ + if (!detaching && (sc->hn_flags & HN_FLAG_VF)) + hn_rxfilter_config(sc); } static void hn_init_locked(struct hn_softc *sc) { struct ifnet *ifp = sc->hn_ifp; int i; HN_LOCK_ASSERT(sc); if ((sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) == 0) return; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; /* Configure RX filter */ hn_rxfilter_config(sc); /* Clear OACTIVE bit. */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); for (i = 0; i < sc->hn_tx_ring_inuse; ++i) sc->hn_tx_ring[i].hn_oactive = 0; /* Clear TX 'suspended' bit. */ hn_resume_tx(sc, sc->hn_tx_ring_inuse); /* Everything is ready; unleash! */ atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING); /* Re-enable polling if requested. */ if (sc->hn_pollhz > 0) hn_polling(sc, sc->hn_pollhz); } static void hn_init(void *xsc) { struct hn_softc *sc = xsc; HN_LOCK(sc); hn_init_locked(sc); HN_UNLOCK(sc); } #if __FreeBSD_version >= 1100099 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; unsigned int lenlim; int error; lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim; error = sysctl_handle_int(oidp, &lenlim, 0, req); if (error || req->newptr == NULL) return error; HN_LOCK(sc); if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) || lenlim > TCP_LRO_LENGTH_MAX) { HN_UNLOCK(sc); return EINVAL; } hn_set_lro_lenlim(sc, lenlim); HN_UNLOCK(sc); return 0; } static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ackcnt, error, i; /* * lro_ackcnt_lim is append count limit, * +1 to turn it into aggregation limit. */ ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1; error = sysctl_handle_int(oidp, &ackcnt, 0, req); if (error || req->newptr == NULL) return error; if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1)) return EINVAL; /* * Convert aggregation limit back to append * count limit. */ --ackcnt; HN_LOCK(sc); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt; HN_UNLOCK(sc); return 0; } #endif static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int hcsum = arg2; int on, error, i; on = 0; if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum) on = 1; error = sysctl_handle_int(oidp, &on, 0, req); if (error || req->newptr == NULL) return error; HN_LOCK(sc); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; if (on) rxr->hn_trust_hcsum |= hcsum; else rxr->hn_trust_hcsum &= ~hcsum; } HN_UNLOCK(sc); return 0; } static int hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int chim_size, error; chim_size = sc->hn_tx_ring[0].hn_chim_size; error = sysctl_handle_int(oidp, &chim_size, 0, req); if (error || req->newptr == NULL) return error; if (chim_size > sc->hn_chim_szmax || chim_size <= 0) return EINVAL; HN_LOCK(sc); hn_set_chim_size(sc, chim_size); HN_UNLOCK(sc); return 0; } #if __FreeBSD_version < 1100095 static int hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_rx_ring *rxr; uint64_t stat; stat = 0; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; stat += *((int *)((uint8_t *)rxr + ofs)); } error = sysctl_handle_64(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; *((int *)((uint8_t *)rxr + ofs)) = 0; } return 0; } #else static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_rx_ring *rxr; uint64_t stat; stat = 0; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; stat += *((uint64_t *)((uint8_t *)rxr + ofs)); } error = sysctl_handle_64(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; *((uint64_t *)((uint8_t *)rxr + ofs)) = 0; } return 0; } #endif static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_rx_ring *rxr; u_long stat; stat = 0; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; stat += *((u_long *)((uint8_t *)rxr + ofs)); } error = sysctl_handle_long(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { rxr = &sc->hn_rx_ring[i]; *((u_long *)((uint8_t *)rxr + ofs)) = 0; } return 0; } static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error; struct hn_tx_ring *txr; u_long stat; stat = 0; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { txr = &sc->hn_tx_ring[i]; stat += *((u_long *)((uint8_t *)txr + ofs)); } error = sysctl_handle_long(oidp, &stat, 0, req); if (error || req->newptr == NULL) return error; /* Zero out this stat. */ for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { txr = &sc->hn_tx_ring[i]; *((u_long *)((uint8_t *)txr + ofs)) = 0; } return 0; } static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int ofs = arg2, i, error, conf; struct hn_tx_ring *txr; txr = &sc->hn_tx_ring[0]; conf = *((int *)((uint8_t *)txr + ofs)); error = sysctl_handle_int(oidp, &conf, 0, req); if (error || req->newptr == NULL) return error; HN_LOCK(sc); for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { txr = &sc->hn_tx_ring[i]; *((int *)((uint8_t *)txr + ofs)) = conf; } HN_UNLOCK(sc); return 0; } static int hn_txagg_size_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int error, size; size = sc->hn_agg_size; error = sysctl_handle_int(oidp, &size, 0, req); if (error || req->newptr == NULL) return (error); HN_LOCK(sc); sc->hn_agg_size = size; hn_set_txagg(sc); HN_UNLOCK(sc); return (0); } static int hn_txagg_pkts_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int error, pkts; pkts = sc->hn_agg_pkts; error = sysctl_handle_int(oidp, &pkts, 0, req); if (error || req->newptr == NULL) return (error); HN_LOCK(sc); sc->hn_agg_pkts = pkts; hn_set_txagg(sc); HN_UNLOCK(sc); return (0); } static int hn_txagg_pktmax_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int pkts; pkts = sc->hn_tx_ring[0].hn_agg_pktmax; return (sysctl_handle_int(oidp, &pkts, 0, req)); } static int hn_txagg_align_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int align; align = sc->hn_tx_ring[0].hn_agg_align; return (sysctl_handle_int(oidp, &align, 0, req)); } static void hn_chan_polling(struct vmbus_channel *chan, u_int pollhz) { if (pollhz == 0) vmbus_chan_poll_disable(chan); else vmbus_chan_poll_enable(chan, pollhz); } static void hn_polling(struct hn_softc *sc, u_int pollhz) { int nsubch = sc->hn_rx_ring_inuse - 1; HN_LOCK_ASSERT(sc); if (nsubch > 0) { struct vmbus_channel **subch; int i; subch = vmbus_subchan_get(sc->hn_prichan, nsubch); for (i = 0; i < nsubch; ++i) hn_chan_polling(subch[i], pollhz); vmbus_subchan_rel(subch, nsubch); } hn_chan_polling(sc->hn_prichan, pollhz); } static int hn_polling_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int pollhz, error; pollhz = sc->hn_pollhz; error = sysctl_handle_int(oidp, &pollhz, 0, req); if (error || req->newptr == NULL) return (error); if (pollhz != 0 && (pollhz < VMBUS_CHAN_POLLHZ_MIN || pollhz > VMBUS_CHAN_POLLHZ_MAX)) return (EINVAL); HN_LOCK(sc); if (sc->hn_pollhz != pollhz) { sc->hn_pollhz = pollhz; if ((sc->hn_ifp->if_drv_flags & IFF_DRV_RUNNING) && (sc->hn_flags & HN_FLAG_SYNTH_ATTACHED)) hn_polling(sc, sc->hn_pollhz); } HN_UNLOCK(sc); return (0); } static int hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; char verstr[16]; snprintf(verstr, sizeof(verstr), "%u.%u", HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver), HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver)); return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); } static int hn_caps_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; char caps_str[128]; uint32_t caps; HN_LOCK(sc); caps = sc->hn_caps; HN_UNLOCK(sc); snprintf(caps_str, sizeof(caps_str), "%b", caps, HN_CAP_BITS); return sysctl_handle_string(oidp, caps_str, sizeof(caps_str), req); } static int hn_hwassist_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; char assist_str[128]; uint32_t hwassist; HN_LOCK(sc); hwassist = sc->hn_ifp->if_hwassist; HN_UNLOCK(sc); snprintf(assist_str, sizeof(assist_str), "%b", hwassist, CSUM_BITS); return sysctl_handle_string(oidp, assist_str, sizeof(assist_str), req); } static int hn_rxfilter_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; char filter_str[128]; uint32_t filter; HN_LOCK(sc); filter = sc->hn_rx_filter; HN_UNLOCK(sc); snprintf(filter_str, sizeof(filter_str), "%b", filter, NDIS_PACKET_TYPES); return sysctl_handle_string(oidp, filter_str, sizeof(filter_str), req); } #ifndef RSS static int hn_rss_key_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int error; HN_LOCK(sc); error = SYSCTL_OUT(req, sc->hn_rss.rss_key, sizeof(sc->hn_rss.rss_key)); if (error || req->newptr == NULL) goto back; error = SYSCTL_IN(req, sc->hn_rss.rss_key, sizeof(sc->hn_rss.rss_key)); if (error) goto back; sc->hn_flags |= HN_FLAG_HAS_RSSKEY; if (sc->hn_rx_ring_inuse > 1) { error = hn_rss_reconfig(sc); } else { /* Not RSS capable, at least for now; just save the RSS key. */ error = 0; } back: HN_UNLOCK(sc); return (error); } static int hn_rss_ind_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int error; HN_LOCK(sc); error = SYSCTL_OUT(req, sc->hn_rss.rss_ind, sizeof(sc->hn_rss.rss_ind)); if (error || req->newptr == NULL) goto back; /* * Don't allow RSS indirect table change, if this interface is not * RSS capable currently. */ if (sc->hn_rx_ring_inuse == 1) { error = EOPNOTSUPP; goto back; } error = SYSCTL_IN(req, sc->hn_rss.rss_ind, sizeof(sc->hn_rss.rss_ind)); if (error) goto back; sc->hn_flags |= HN_FLAG_HAS_RSSIND; hn_rss_ind_fixup(sc); error = hn_rss_reconfig(sc); back: HN_UNLOCK(sc); return (error); } #endif /* !RSS */ static int hn_rss_hash_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; char hash_str[128]; uint32_t hash; HN_LOCK(sc); hash = sc->hn_rss_hash; HN_UNLOCK(sc); snprintf(hash_str, sizeof(hash_str), "%b", hash, NDIS_HASH_BITS); return sysctl_handle_string(oidp, hash_str, sizeof(hash_str), req); } static int +hn_vf_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct hn_softc *sc = arg1; + char vf_name[128]; + struct ifnet *vf; + + HN_LOCK(sc); + vf_name[0] = '\0'; + vf = sc->hn_rx_ring[0].hn_vf; + if (vf != NULL) + snprintf(vf_name, sizeof(vf_name), "%s", if_name(vf)); + HN_UNLOCK(sc); + return sysctl_handle_string(oidp, vf_name, sizeof(vf_name), req); +} + +static int hn_check_iplen(const struct mbuf *m, int hoff) { const struct ip *ip; int len, iphlen, iplen; const struct tcphdr *th; int thoff; /* TCP data offset */ len = hoff + sizeof(struct ip); /* The packet must be at least the size of an IP header. */ if (m->m_pkthdr.len < len) return IPPROTO_DONE; /* The fixed IP header must reside completely in the first mbuf. */ if (m->m_len < len) return IPPROTO_DONE; ip = mtodo(m, hoff); /* Bound check the packet's stated IP header length. */ iphlen = ip->ip_hl << 2; if (iphlen < sizeof(struct ip)) /* minimum header length */ return IPPROTO_DONE; /* The full IP header must reside completely in the one mbuf. */ if (m->m_len < hoff + iphlen) return IPPROTO_DONE; iplen = ntohs(ip->ip_len); /* * Check that the amount of data in the buffers is as * at least much as the IP header would have us expect. */ if (m->m_pkthdr.len < hoff + iplen) return IPPROTO_DONE; /* * Ignore IP fragments. */ if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF)) return IPPROTO_DONE; /* * The TCP/IP or UDP/IP header must be entirely contained within * the first fragment of a packet. */ switch (ip->ip_p) { case IPPROTO_TCP: if (iplen < iphlen + sizeof(struct tcphdr)) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + sizeof(struct tcphdr)) return IPPROTO_DONE; th = (const struct tcphdr *)((const uint8_t *)ip + iphlen); thoff = th->th_off << 2; if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + thoff) return IPPROTO_DONE; break; case IPPROTO_UDP: if (iplen < iphlen + sizeof(struct udphdr)) return IPPROTO_DONE; if (m->m_len < hoff + iphlen + sizeof(struct udphdr)) return IPPROTO_DONE; break; default: if (iplen < iphlen) return IPPROTO_DONE; break; } return ip->ip_p; } static int hn_create_rx_data(struct hn_softc *sc, int ring_cnt) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; device_t dev = sc->hn_dev; #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 int lroent_cnt; #endif #endif int i; /* * Create RXBUF for reception. * * NOTE: * - It is shared by all channels. * - A large enough buffer is allocated, certain version of NVSes * may further limit the usable space. */ sc->hn_rxbuf = hyperv_dmamem_alloc(bus_get_dma_tag(dev), PAGE_SIZE, 0, HN_RXBUF_SIZE, &sc->hn_rxbuf_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); if (sc->hn_rxbuf == NULL) { device_printf(sc->hn_dev, "allocate rxbuf failed\n"); return (ENOMEM); } sc->hn_rx_ring_cnt = ring_cnt; sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt; sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt, M_DEVBUF, M_WAITOK | M_ZERO); #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 lroent_cnt = hn_lro_entry_count; if (lroent_cnt < TCP_LRO_ENTRIES) lroent_cnt = TCP_LRO_ENTRIES; if (bootverbose) device_printf(dev, "LRO: entry count %d\n", lroent_cnt); #endif #endif /* INET || INET6 */ ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); /* Create dev.hn.UNIT.rx sysctl tree */ sc->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "rx", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; rxr->hn_br = hyperv_dmamem_alloc(bus_get_dma_tag(dev), PAGE_SIZE, 0, HN_TXBR_SIZE + HN_RXBR_SIZE, &rxr->hn_br_dma, BUS_DMA_WAITOK); if (rxr->hn_br == NULL) { device_printf(dev, "allocate bufring failed\n"); return (ENOMEM); } if (hn_trust_hosttcp) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; if (hn_trust_hostudp) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP; if (hn_trust_hostip) rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; rxr->hn_ifp = sc->hn_ifp; if (i < sc->hn_tx_ring_cnt) rxr->hn_txr = &sc->hn_tx_ring[i]; rxr->hn_pktbuf_len = HN_PKTBUF_LEN_DEF; rxr->hn_pktbuf = malloc(rxr->hn_pktbuf_len, M_DEVBUF, M_WAITOK); rxr->hn_rx_idx = i; rxr->hn_rxbuf = sc->hn_rxbuf; /* * Initialize LRO. */ #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, hn_lro_mbufq_depth); #else tcp_lro_init(&rxr->hn_lro); rxr->hn_lro.ifp = sc->hn_ifp; #endif #if __FreeBSD_version >= 1100099 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; #endif #endif /* INET || INET6 */ if (sc->hn_rx_sysctl_tree != NULL) { char name[16]; /* * Create per RX ring sysctl tree: * dev.hn.UNIT.rx.RINGID */ snprintf(name, sizeof(name), "%d", i); rxr->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->hn_rx_sysctl_tree), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); if (rxr->hn_rx_sysctl_tree != NULL) { SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree), OID_AUTO, "packets", CTLFLAG_RW, &rxr->hn_pkts, "# of packets received"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree), OID_AUTO, "rss_pkts", CTLFLAG_RW, &rxr->hn_rss_pkts, "# of packets w/ RSS info received"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree), OID_AUTO, "pktbuf_len", CTLFLAG_RD, &rxr->hn_pktbuf_len, 0, "Temporary channel packet buffer length"); } } } SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_lro.lro_queued), #if __FreeBSD_version < 1100095 hn_rx_stat_int_sysctl, #else hn_rx_stat_u64_sysctl, #endif "LU", "LRO queued"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_lro.lro_flushed), #if __FreeBSD_version < 1100095 hn_rx_stat_int_sysctl, #else hn_rx_stat_u64_sysctl, #endif "LU", "LRO flushed"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_lro_tried), hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries"); #if __FreeBSD_version >= 1100099 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_lro_lenlim_sysctl, "IU", "Max # of data bytes to be aggregated by LRO"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_lro_ackcnt_sysctl, "I", "Max # of ACKs to be aggregated by LRO"); #endif SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_TCP, hn_trust_hcsum_sysctl, "I", "Trust tcp segement verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_UDP, hn_trust_hcsum_sysctl, "I", "Trust udp datagram verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_IP, hn_trust_hcsum_sysctl, "I", "Trust ip packet verification on host side, " "when csum info is missing"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_csum_ip), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_csum_tcp), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_csum_udp), hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_csum_trusted), hn_rx_stat_ulong_sysctl, "LU", "# of packets that we trust host's csum verification"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_small_pkts), hn_rx_stat_ulong_sysctl, "LU", "# of small packets received"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_ack_failed", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_rx_ring, hn_ack_failed), hn_rx_stat_ulong_sysctl, "LU", "# of RXBUF ack failures"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse", CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings"); return (0); } static void hn_destroy_rx_data(struct hn_softc *sc) { int i; if (sc->hn_rxbuf != NULL) { if ((sc->hn_flags & HN_FLAG_RXBUF_REF) == 0) hyperv_dmamem_free(&sc->hn_rxbuf_dma, sc->hn_rxbuf); else device_printf(sc->hn_dev, "RXBUF is referenced\n"); sc->hn_rxbuf = NULL; } if (sc->hn_rx_ring_cnt == 0) return; for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; if (rxr->hn_br == NULL) continue; if ((rxr->hn_rx_flags & HN_RX_FLAG_BR_REF) == 0) { hyperv_dmamem_free(&rxr->hn_br_dma, rxr->hn_br); } else { device_printf(sc->hn_dev, "%dth channel bufring is referenced", i); } rxr->hn_br = NULL; #if defined(INET) || defined(INET6) tcp_lro_free(&rxr->hn_lro); #endif free(rxr->hn_pktbuf, M_DEVBUF); } free(sc->hn_rx_ring, M_DEVBUF); sc->hn_rx_ring = NULL; sc->hn_rx_ring_cnt = 0; sc->hn_rx_ring_inuse = 0; } static int hn_tx_ring_create(struct hn_softc *sc, int id) { struct hn_tx_ring *txr = &sc->hn_tx_ring[id]; device_t dev = sc->hn_dev; bus_dma_tag_t parent_dtag; int error, i; txr->hn_sc = sc; txr->hn_tx_idx = id; #ifndef HN_USE_TXDESC_BUFRING mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN); #endif mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF); txr->hn_txdesc_cnt = HN_TX_DESC_CNT; txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt, M_DEVBUF, M_WAITOK | M_ZERO); #ifndef HN_USE_TXDESC_BUFRING SLIST_INIT(&txr->hn_txlist); #else txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_DEVBUF, M_WAITOK, &txr->hn_tx_lock); #endif if (hn_tx_taskq_mode == HN_TX_TASKQ_M_EVTTQ) { txr->hn_tx_taskq = VMBUS_GET_EVENT_TASKQ( device_get_parent(dev), dev, HN_RING_IDX2CPU(sc, id)); } else { txr->hn_tx_taskq = sc->hn_tx_taskqs[id % hn_tx_taskq_cnt]; } #ifdef HN_IFSTART_SUPPORT if (hn_use_if_start) { txr->hn_txeof = hn_start_txeof; TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr); TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr); } else #endif { int br_depth; txr->hn_txeof = hn_xmit_txeof; TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr); TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr); br_depth = hn_get_txswq_depth(txr); txr->hn_mbuf_br = buf_ring_alloc(br_depth, M_DEVBUF, M_WAITOK, &txr->hn_tx_lock); } txr->hn_direct_tx_size = hn_direct_tx_size; /* * Always schedule transmission instead of trying to do direct * transmission. This one gives the best performance so far. */ txr->hn_sched_tx = 1; parent_dtag = bus_get_dma_tag(dev); /* DMA tag for RNDIS packet messages. */ error = bus_dma_tag_create(parent_dtag, /* parent */ HN_RNDIS_PKT_ALIGN, /* alignment */ HN_RNDIS_PKT_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HN_RNDIS_PKT_LEN, /* maxsize */ 1, /* nsegments */ HN_RNDIS_PKT_LEN, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->hn_tx_rndis_dtag); if (error) { device_printf(dev, "failed to create rndis dmatag\n"); return error; } /* DMA tag for data. */ error = bus_dma_tag_create(parent_dtag, /* parent */ 1, /* alignment */ HN_TX_DATA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HN_TX_DATA_MAXSIZE, /* maxsize */ HN_TX_DATA_SEGCNT_MAX, /* nsegments */ HN_TX_DATA_SEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->hn_tx_data_dtag); if (error) { device_printf(dev, "failed to create data dmatag\n"); return error; } for (i = 0; i < txr->hn_txdesc_cnt; ++i) { struct hn_txdesc *txd = &txr->hn_txdesc[i]; txd->txr = txr; txd->chim_index = HN_NVS_CHIM_IDX_INVALID; STAILQ_INIT(&txd->agg_list); /* * Allocate and load RNDIS packet message. */ error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag, (void **)&txd->rndis_pkt, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &txd->rndis_pkt_dmap); if (error) { device_printf(dev, "failed to allocate rndis_packet_msg, %d\n", i); return error; } error = bus_dmamap_load(txr->hn_tx_rndis_dtag, txd->rndis_pkt_dmap, txd->rndis_pkt, HN_RNDIS_PKT_LEN, hyperv_dma_map_paddr, &txd->rndis_pkt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "failed to load rndis_packet_msg, %d\n", i); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_pkt, txd->rndis_pkt_dmap); return error; } /* DMA map for TX data. */ error = bus_dmamap_create(txr->hn_tx_data_dtag, 0, &txd->data_dmap); if (error) { device_printf(dev, "failed to allocate tx data dmamap\n"); bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_pkt_dmap); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_pkt, txd->rndis_pkt_dmap); return error; } /* All set, put it to list */ txd->flags |= HN_TXD_FLAG_ONLIST; #ifndef HN_USE_TXDESC_BUFRING SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); #else buf_ring_enqueue(txr->hn_txdesc_br, txd); #endif } txr->hn_txdesc_avail = txr->hn_txdesc_cnt; if (sc->hn_tx_sysctl_tree != NULL) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; char name[16]; /* * Create per TX ring sysctl tree: * dev.hn.UNIT.tx.RINGID */ ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree); snprintf(name, sizeof(name), "%d", id); txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); if (txr->hn_tx_sysctl_tree != NULL) { child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree); #ifdef HN_DEBUG SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail", CTLFLAG_RD, &txr->hn_txdesc_avail, 0, "# of available TX descs"); #endif #ifdef HN_IFSTART_SUPPORT if (!hn_use_if_start) #endif { SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive", CTLFLAG_RD, &txr->hn_oactive, 0, "over active"); } SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets", CTLFLAG_RW, &txr->hn_pkts, "# of packets transmitted"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "sends", CTLFLAG_RW, &txr->hn_sends, "# of sends"); } } return 0; } static void hn_txdesc_dmamap_destroy(struct hn_txdesc *txd) { struct hn_tx_ring *txr = txd->txr; KASSERT(txd->m == NULL, ("still has mbuf installed")); KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped")); bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_pkt_dmap); bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_pkt, txd->rndis_pkt_dmap); bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap); } static void hn_txdesc_gc(struct hn_tx_ring *txr, struct hn_txdesc *txd) { KASSERT(txd->refs == 0 || txd->refs == 1, ("invalid txd refs %d", txd->refs)); /* Aggregated txds will be freed by their aggregating txd. */ if (txd->refs > 0 && (txd->flags & HN_TXD_FLAG_ONAGG) == 0) { int freed; freed = hn_txdesc_put(txr, txd); KASSERT(freed, ("can't free txdesc")); } } static void hn_tx_ring_destroy(struct hn_tx_ring *txr) { int i; if (txr->hn_txdesc == NULL) return; /* * NOTE: * Because the freeing of aggregated txds will be deferred * to the aggregating txd, two passes are used here: * - The first pass GCes any pending txds. This GC is necessary, * since if the channels are revoked, hypervisor will not * deliver send-done for all pending txds. * - The second pass frees the busdma stuffs, i.e. after all txds * were freed. */ for (i = 0; i < txr->hn_txdesc_cnt; ++i) hn_txdesc_gc(txr, &txr->hn_txdesc[i]); for (i = 0; i < txr->hn_txdesc_cnt; ++i) hn_txdesc_dmamap_destroy(&txr->hn_txdesc[i]); if (txr->hn_tx_data_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_data_dtag); if (txr->hn_tx_rndis_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_rndis_dtag); #ifdef HN_USE_TXDESC_BUFRING buf_ring_free(txr->hn_txdesc_br, M_DEVBUF); #endif free(txr->hn_txdesc, M_DEVBUF); txr->hn_txdesc = NULL; if (txr->hn_mbuf_br != NULL) buf_ring_free(txr->hn_mbuf_br, M_DEVBUF); #ifndef HN_USE_TXDESC_BUFRING mtx_destroy(&txr->hn_txlist_spin); #endif mtx_destroy(&txr->hn_tx_lock); } static int hn_create_tx_data(struct hn_softc *sc, int ring_cnt) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; int i; /* * Create TXBUF for chimney sending. * * NOTE: It is shared by all channels. */ sc->hn_chim = hyperv_dmamem_alloc(bus_get_dma_tag(sc->hn_dev), PAGE_SIZE, 0, HN_CHIM_SIZE, &sc->hn_chim_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); if (sc->hn_chim == NULL) { device_printf(sc->hn_dev, "allocate txbuf failed\n"); return (ENOMEM); } sc->hn_tx_ring_cnt = ring_cnt; sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt; sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt, M_DEVBUF, M_WAITOK | M_ZERO); ctx = device_get_sysctl_ctx(sc->hn_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev)); /* Create dev.hn.UNIT.tx sysctl tree */ sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { int error; error = hn_tx_ring_create(sc, i); if (error) return error; } SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_no_txdescs), hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_send_failed), hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_txdma_failed), hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_flush_failed", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_flush_failed), hn_tx_stat_ulong_sysctl, "LU", "# of packet transmission aggregation flush failure"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_tx_collapsed), hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_tx_chimney), hn_tx_stat_ulong_sysctl, "LU", "# of chimney send"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_tried", CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_tx_chimney_tried), hn_tx_stat_ulong_sysctl, "LU", "# of chimney send tries"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt", CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0, "# of total TX descs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max", CTLFLAG_RD, &sc->hn_chim_szmax, 0, "Chimney send packet size upper boundary"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, hn_chim_size_sysctl, "I", "Chimney send packet size limit"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_direct_tx_size), hn_tx_conf_int_sysctl, "I", "Size of the packet for direct transmission"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, __offsetof(struct hn_tx_ring, hn_sched_tx), hn_tx_conf_int_sysctl, "I", "Always schedule transmission " "instead of doing direct transmission"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "agg_szmax", CTLFLAG_RD, &sc->hn_tx_ring[0].hn_agg_szmax, 0, "Applied packet transmission aggregation size"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_pktmax", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, hn_txagg_pktmax_sysctl, "I", "Applied packet transmission aggregation packets"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_align", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, hn_txagg_align_sysctl, "I", "Applied packet transmission aggregation alignment"); return 0; } static void hn_set_chim_size(struct hn_softc *sc, int chim_size) { int i; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) sc->hn_tx_ring[i].hn_chim_size = chim_size; } static void hn_set_tso_maxsize(struct hn_softc *sc, int tso_maxlen, int mtu) { struct ifnet *ifp = sc->hn_ifp; int tso_minlen; if ((ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) == 0) return; KASSERT(sc->hn_ndis_tso_sgmin >= 2, ("invalid NDIS tso sgmin %d", sc->hn_ndis_tso_sgmin)); tso_minlen = sc->hn_ndis_tso_sgmin * mtu; KASSERT(sc->hn_ndis_tso_szmax >= tso_minlen && sc->hn_ndis_tso_szmax <= IP_MAXPACKET, ("invalid NDIS tso szmax %d", sc->hn_ndis_tso_szmax)); if (tso_maxlen < tso_minlen) tso_maxlen = tso_minlen; else if (tso_maxlen > IP_MAXPACKET) tso_maxlen = IP_MAXPACKET; if (tso_maxlen > sc->hn_ndis_tso_szmax) tso_maxlen = sc->hn_ndis_tso_szmax; ifp->if_hw_tsomax = tso_maxlen - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); if (bootverbose) if_printf(ifp, "TSO size max %u\n", ifp->if_hw_tsomax); } static void hn_fixup_tx_data(struct hn_softc *sc) { uint64_t csum_assist; int i; hn_set_chim_size(sc, sc->hn_chim_szmax); if (hn_tx_chimney_size > 0 && hn_tx_chimney_size < sc->hn_chim_szmax) hn_set_chim_size(sc, hn_tx_chimney_size); csum_assist = 0; if (sc->hn_caps & HN_CAP_IPCS) csum_assist |= CSUM_IP; if (sc->hn_caps & HN_CAP_TCP4CS) csum_assist |= CSUM_IP_TCP; if (sc->hn_caps & HN_CAP_UDP4CS) csum_assist |= CSUM_IP_UDP; if (sc->hn_caps & HN_CAP_TCP6CS) csum_assist |= CSUM_IP6_TCP; if (sc->hn_caps & HN_CAP_UDP6CS) csum_assist |= CSUM_IP6_UDP; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) sc->hn_tx_ring[i].hn_csum_assist = csum_assist; if (sc->hn_caps & HN_CAP_HASHVAL) { /* * Support HASHVAL pktinfo on TX path. */ if (bootverbose) if_printf(sc->hn_ifp, "support HASHVAL pktinfo\n"); for (i = 0; i < sc->hn_tx_ring_cnt; ++i) sc->hn_tx_ring[i].hn_tx_flags |= HN_TX_FLAG_HASHVAL; } } static void hn_destroy_tx_data(struct hn_softc *sc) { int i; if (sc->hn_chim != NULL) { if ((sc->hn_flags & HN_FLAG_CHIM_REF) == 0) { hyperv_dmamem_free(&sc->hn_chim_dma, sc->hn_chim); } else { device_printf(sc->hn_dev, "chimney sending buffer is referenced"); } sc->hn_chim = NULL; } if (sc->hn_tx_ring_cnt == 0) return; for (i = 0; i < sc->hn_tx_ring_cnt; ++i) hn_tx_ring_destroy(&sc->hn_tx_ring[i]); free(sc->hn_tx_ring, M_DEVBUF); sc->hn_tx_ring = NULL; sc->hn_tx_ring_cnt = 0; sc->hn_tx_ring_inuse = 0; } #ifdef HN_IFSTART_SUPPORT static void hn_start_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); hn_start_locked(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static int hn_start_locked(struct hn_tx_ring *txr, int len) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; int sched = 0; KASSERT(hn_use_if_start, ("hn_start_locked is called, when if_start is disabled")); KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); mtx_assert(&txr->hn_tx_lock, MA_OWNED); KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); if (__predict_false(txr->hn_suspended)) return (0); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (0); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { struct hn_txdesc *txd; struct mbuf *m_head; int error; IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (len > 0 && m_head->m_pkthdr.len > len) { /* * This sending could be time consuming; let callers * dispatch this packet sending (and sending of any * following up packets) to tx taskqueue. */ IFQ_DRV_PREPEND(&ifp->if_snd, m_head); sched = 1; break; } #if defined(INET6) || defined(INET) if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { m_head = hn_tso_fixup(m_head); if (__predict_false(m_head == NULL)) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); continue; } } #endif txd = hn_txdesc_get(txr); if (txd == NULL) { txr->hn_no_txdescs++; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } error = hn_encap(ifp, txr, txd, &m_head); if (error) { /* Both txd and m_head are freed */ KASSERT(txr->hn_agg_txd == NULL, ("encap failed w/ pending aggregating txdesc")); continue; } if (txr->hn_agg_pktleft == 0) { if (txr->hn_agg_txd != NULL) { KASSERT(m_head == NULL, ("pending mbuf for aggregating txdesc")); error = hn_flush_txagg(ifp, txr); if (__predict_false(error)) { atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } } else { KASSERT(m_head != NULL, ("mbuf was freed")); error = hn_txpkt(ifp, txr, txd); if (__predict_false(error)) { /* txd is freed, but m_head is not */ IFQ_DRV_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } } } #ifdef INVARIANTS else { KASSERT(txr->hn_agg_txd != NULL, ("no aggregating txdesc")); KASSERT(m_head == NULL, ("pending mbuf for aggregating txdesc")); } #endif } /* Flush pending aggerated transmission. */ if (txr->hn_agg_txd != NULL) hn_flush_txagg(ifp, txr); return (sched); } static void hn_start(struct ifnet *ifp) { struct hn_softc *sc = ifp->if_softc; struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; sched = hn_start_locked(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (!sched) return; } do_sched: taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } static void hn_start_txeof_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE); hn_start_locked(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_start_txeof(struct hn_tx_ring *txr) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); sched = hn_start_locked(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (sched) { taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } } else { do_sched: /* * Release the OACTIVE earlier, with the hope, that * others could catch up. The task will clear the * flag again with the hn_tx_lock to avoid possible * races. */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); } } #endif /* HN_IFSTART_SUPPORT */ static int hn_xmit(struct hn_tx_ring *txr, int len) { struct hn_softc *sc = txr->hn_sc; struct ifnet *ifp = sc->hn_ifp; struct mbuf *m_head; int sched = 0; mtx_assert(&txr->hn_tx_lock, MA_OWNED); #ifdef HN_IFSTART_SUPPORT KASSERT(hn_use_if_start == 0, ("hn_xmit is called, when if_start is enabled")); #endif KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); if (__predict_false(txr->hn_suspended)) return (0); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive) return (0); while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) { struct hn_txdesc *txd; int error; if (len > 0 && m_head->m_pkthdr.len > len) { /* * This sending could be time consuming; let callers * dispatch this packet sending (and sending of any * following up packets) to tx taskqueue. */ drbr_putback(ifp, txr->hn_mbuf_br, m_head); sched = 1; break; } txd = hn_txdesc_get(txr); if (txd == NULL) { txr->hn_no_txdescs++; drbr_putback(ifp, txr->hn_mbuf_br, m_head); txr->hn_oactive = 1; break; } error = hn_encap(ifp, txr, txd, &m_head); if (error) { /* Both txd and m_head are freed; discard */ KASSERT(txr->hn_agg_txd == NULL, ("encap failed w/ pending aggregating txdesc")); drbr_advance(ifp, txr->hn_mbuf_br); continue; } if (txr->hn_agg_pktleft == 0) { if (txr->hn_agg_txd != NULL) { KASSERT(m_head == NULL, ("pending mbuf for aggregating txdesc")); error = hn_flush_txagg(ifp, txr); if (__predict_false(error)) { txr->hn_oactive = 1; break; } } else { KASSERT(m_head != NULL, ("mbuf was freed")); error = hn_txpkt(ifp, txr, txd); if (__predict_false(error)) { /* txd is freed, but m_head is not */ drbr_putback(ifp, txr->hn_mbuf_br, m_head); txr->hn_oactive = 1; break; } } } #ifdef INVARIANTS else { KASSERT(txr->hn_agg_txd != NULL, ("no aggregating txdesc")); KASSERT(m_head == NULL, ("pending mbuf for aggregating txdesc")); } #endif /* Sent */ drbr_advance(ifp, txr->hn_mbuf_br); } /* Flush pending aggerated transmission. */ if (txr->hn_agg_txd != NULL) hn_flush_txagg(ifp, txr); return (sched); } static int hn_transmit(struct ifnet *ifp, struct mbuf *m) { struct hn_softc *sc = ifp->if_softc; struct hn_tx_ring *txr; int error, idx = 0; #if defined(INET6) || defined(INET) /* * Perform TSO packet header fixup now, since the TSO * packet header should be cache-hot. */ if (m->m_pkthdr.csum_flags & CSUM_TSO) { m = hn_tso_fixup(m); if (__predict_false(m == NULL)) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return EIO; } } #endif /* * Select the TX ring based on flowid */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { #ifdef RSS uint32_t bid; if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), &bid) == 0) idx = bid % sc->hn_tx_ring_inuse; else #endif idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse; } txr = &sc->hn_tx_ring[idx]; error = drbr_enqueue(ifp, txr->hn_mbuf_br, m); if (error) { if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); return error; } if (txr->hn_oactive) return 0; if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; sched = hn_xmit(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (!sched) return 0; } do_sched: taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); return 0; } static void hn_tx_ring_qflush(struct hn_tx_ring *txr) { struct mbuf *m; mtx_lock(&txr->hn_tx_lock); while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL) m_freem(m); mtx_unlock(&txr->hn_tx_lock); } static void hn_xmit_qflush(struct ifnet *ifp) { struct hn_softc *sc = ifp->if_softc; int i; for (i = 0; i < sc->hn_tx_ring_inuse; ++i) hn_tx_ring_qflush(&sc->hn_tx_ring[i]); if_qflush(ifp); } static void hn_xmit_txeof(struct hn_tx_ring *txr) { if (txr->hn_sched_tx) goto do_sched; if (mtx_trylock(&txr->hn_tx_lock)) { int sched; txr->hn_oactive = 0; sched = hn_xmit(txr, txr->hn_direct_tx_size); mtx_unlock(&txr->hn_tx_lock); if (sched) { taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } } else { do_sched: /* * Release the oactive earlier, with the hope, that * others could catch up. The task will clear the * oactive again with the hn_tx_lock to avoid possible * races. */ txr->hn_oactive = 0; taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); } } static void hn_xmit_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); hn_xmit(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static void hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; mtx_lock(&txr->hn_tx_lock); txr->hn_oactive = 0; hn_xmit(txr, 0); mtx_unlock(&txr->hn_tx_lock); } static int hn_chan_attach(struct hn_softc *sc, struct vmbus_channel *chan) { struct vmbus_chan_br cbr; struct hn_rx_ring *rxr; struct hn_tx_ring *txr = NULL; int idx, error; idx = vmbus_chan_subidx(chan); /* * Link this channel to RX/TX ring. */ KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse, ("invalid channel index %d, should > 0 && < %d", idx, sc->hn_rx_ring_inuse)); rxr = &sc->hn_rx_ring[idx]; KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0, ("RX ring %d already attached", idx)); rxr->hn_rx_flags |= HN_RX_FLAG_ATTACHED; + rxr->hn_chan = chan; if (bootverbose) { if_printf(sc->hn_ifp, "link RX ring %d to chan%u\n", idx, vmbus_chan_id(chan)); } if (idx < sc->hn_tx_ring_inuse) { txr = &sc->hn_tx_ring[idx]; KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0, ("TX ring %d already attached", idx)); txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED; txr->hn_chan = chan; if (bootverbose) { if_printf(sc->hn_ifp, "link TX ring %d to chan%u\n", idx, vmbus_chan_id(chan)); } } /* Bind this channel to a proper CPU. */ vmbus_chan_cpu_set(chan, HN_RING_IDX2CPU(sc, idx)); /* * Open this channel */ cbr.cbr = rxr->hn_br; cbr.cbr_paddr = rxr->hn_br_dma.hv_paddr; cbr.cbr_txsz = HN_TXBR_SIZE; cbr.cbr_rxsz = HN_RXBR_SIZE; error = vmbus_chan_open_br(chan, &cbr, NULL, 0, hn_chan_callback, rxr); if (error) { if (error == EISCONN) { if_printf(sc->hn_ifp, "bufring is connected after " "chan%u open failure\n", vmbus_chan_id(chan)); rxr->hn_rx_flags |= HN_RX_FLAG_BR_REF; } else { if_printf(sc->hn_ifp, "open chan%u failed: %d\n", vmbus_chan_id(chan), error); } } return (error); } static void hn_chan_detach(struct hn_softc *sc, struct vmbus_channel *chan) { struct hn_rx_ring *rxr; int idx, error; idx = vmbus_chan_subidx(chan); /* * Link this channel to RX/TX ring. */ KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse, ("invalid channel index %d, should > 0 && < %d", idx, sc->hn_rx_ring_inuse)); rxr = &sc->hn_rx_ring[idx]; KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED), ("RX ring %d is not attached", idx)); rxr->hn_rx_flags &= ~HN_RX_FLAG_ATTACHED; if (idx < sc->hn_tx_ring_inuse) { struct hn_tx_ring *txr = &sc->hn_tx_ring[idx]; KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED), ("TX ring %d is not attached attached", idx)); txr->hn_tx_flags &= ~HN_TX_FLAG_ATTACHED; } /* * Close this channel. * * NOTE: * Channel closing does _not_ destroy the target channel. */ error = vmbus_chan_close_direct(chan); if (error == EISCONN) { if_printf(sc->hn_ifp, "chan%u bufring is connected " "after being closed\n", vmbus_chan_id(chan)); rxr->hn_rx_flags |= HN_RX_FLAG_BR_REF; } else if (error) { if_printf(sc->hn_ifp, "chan%u close failed: %d\n", vmbus_chan_id(chan), error); } } static int hn_attach_subchans(struct hn_softc *sc) { struct vmbus_channel **subchans; int subchan_cnt = sc->hn_rx_ring_inuse - 1; int i, error = 0; KASSERT(subchan_cnt > 0, ("no sub-channels")); /* Attach the sub-channels. */ subchans = vmbus_subchan_get(sc->hn_prichan, subchan_cnt); for (i = 0; i < subchan_cnt; ++i) { int error1; error1 = hn_chan_attach(sc, subchans[i]); if (error1) { error = error1; /* Move on; all channels will be detached later. */ } } vmbus_subchan_rel(subchans, subchan_cnt); if (error) { if_printf(sc->hn_ifp, "sub-channels attach failed: %d\n", error); } else { if (bootverbose) { if_printf(sc->hn_ifp, "%d sub-channels attached\n", subchan_cnt); } } return (error); } static void hn_detach_allchans(struct hn_softc *sc) { struct vmbus_channel **subchans; int subchan_cnt = sc->hn_rx_ring_inuse - 1; int i; if (subchan_cnt == 0) goto back; /* Detach the sub-channels. */ subchans = vmbus_subchan_get(sc->hn_prichan, subchan_cnt); for (i = 0; i < subchan_cnt; ++i) hn_chan_detach(sc, subchans[i]); vmbus_subchan_rel(subchans, subchan_cnt); back: /* * Detach the primary channel, _after_ all sub-channels * are detached. */ hn_chan_detach(sc, sc->hn_prichan); /* Wait for sub-channels to be destroyed, if any. */ vmbus_subchan_drain(sc->hn_prichan); #ifdef INVARIANTS for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { KASSERT((sc->hn_rx_ring[i].hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0, ("%dth RX ring is still attached", i)); } for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { KASSERT((sc->hn_tx_ring[i].hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0, ("%dth TX ring is still attached", i)); } #endif } static int hn_synth_alloc_subchans(struct hn_softc *sc, int *nsubch) { struct vmbus_channel **subchans; int nchan, rxr_cnt, error; nchan = *nsubch + 1; if (nchan == 1) { /* * Multiple RX/TX rings are not requested. */ *nsubch = 0; return (0); } /* * Query RSS capabilities, e.g. # of RX rings, and # of indirect * table entries. */ error = hn_rndis_query_rsscaps(sc, &rxr_cnt); if (error) { /* No RSS; this is benign. */ *nsubch = 0; return (0); } if (bootverbose) { if_printf(sc->hn_ifp, "RX rings offered %u, requested %d\n", rxr_cnt, nchan); } if (nchan > rxr_cnt) nchan = rxr_cnt; if (nchan == 1) { if_printf(sc->hn_ifp, "only 1 channel is supported, no vRSS\n"); *nsubch = 0; return (0); } /* * Allocate sub-channels from NVS. */ *nsubch = nchan - 1; error = hn_nvs_alloc_subchans(sc, nsubch); if (error || *nsubch == 0) { /* Failed to allocate sub-channels. */ *nsubch = 0; return (0); } /* * Wait for all sub-channels to become ready before moving on. */ subchans = vmbus_subchan_get(sc->hn_prichan, *nsubch); vmbus_subchan_rel(subchans, *nsubch); return (0); } static bool hn_synth_attachable(const struct hn_softc *sc) { int i; if (sc->hn_flags & HN_FLAG_ERRORS) return (false); for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { const struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; if (rxr->hn_rx_flags & HN_RX_FLAG_BR_REF) return (false); } return (true); } static int hn_synth_attach(struct hn_softc *sc, int mtu) { #define ATTACHED_NVS 0x0002 #define ATTACHED_RNDIS 0x0004 struct ndis_rssprm_toeplitz *rss = &sc->hn_rss; int error, nsubch, nchan, i; uint32_t old_caps, attached = 0; KASSERT((sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) == 0, ("synthetic parts were attached")); if (!hn_synth_attachable(sc)) return (ENXIO); /* Save capabilities for later verification. */ old_caps = sc->hn_caps; sc->hn_caps = 0; /* Clear RSS stuffs. */ sc->hn_rss_ind_size = 0; sc->hn_rss_hash = 0; /* * Attach the primary channel _before_ attaching NVS and RNDIS. */ error = hn_chan_attach(sc, sc->hn_prichan); if (error) goto failed; /* * Attach NVS. */ error = hn_nvs_attach(sc, mtu); if (error) goto failed; attached |= ATTACHED_NVS; /* * Attach RNDIS _after_ NVS is attached. */ error = hn_rndis_attach(sc, mtu); if (error) goto failed; attached |= ATTACHED_RNDIS; /* * Make sure capabilities are not changed. */ if (device_is_attached(sc->hn_dev) && old_caps != sc->hn_caps) { if_printf(sc->hn_ifp, "caps mismatch old 0x%08x, new 0x%08x\n", old_caps, sc->hn_caps); error = ENXIO; goto failed; } /* * Allocate sub-channels for multi-TX/RX rings. * * NOTE: * The # of RX rings that can be used is equivalent to the # of * channels to be requested. */ nsubch = sc->hn_rx_ring_cnt - 1; error = hn_synth_alloc_subchans(sc, &nsubch); if (error) goto failed; /* NOTE: _Full_ synthetic parts detach is required now. */ sc->hn_flags |= HN_FLAG_SYNTH_ATTACHED; /* * Set the # of TX/RX rings that could be used according to * the # of channels that NVS offered. */ nchan = nsubch + 1; hn_set_ring_inuse(sc, nchan); if (nchan == 1) { /* Only the primary channel can be used; done */ goto back; } /* * Attach the sub-channels. * * NOTE: hn_set_ring_inuse() _must_ have been called. */ error = hn_attach_subchans(sc); if (error) goto failed; /* * Configure RSS key and indirect table _after_ all sub-channels * are attached. */ if ((sc->hn_flags & HN_FLAG_HAS_RSSKEY) == 0) { /* * RSS key is not set yet; set it to the default RSS key. */ if (bootverbose) if_printf(sc->hn_ifp, "setup default RSS key\n"); #ifdef RSS rss_getkey(rss->rss_key); #else memcpy(rss->rss_key, hn_rss_key_default, sizeof(rss->rss_key)); #endif sc->hn_flags |= HN_FLAG_HAS_RSSKEY; } if ((sc->hn_flags & HN_FLAG_HAS_RSSIND) == 0) { /* * RSS indirect table is not set yet; set it up in round- * robin fashion. */ if (bootverbose) { if_printf(sc->hn_ifp, "setup default RSS indirect " "table\n"); } for (i = 0; i < NDIS_HASH_INDCNT; ++i) { uint32_t subidx; #ifdef RSS subidx = rss_get_indirection_to_bucket(i); #else subidx = i; #endif rss->rss_ind[i] = subidx % nchan; } sc->hn_flags |= HN_FLAG_HAS_RSSIND; } else { /* * # of usable channels may be changed, so we have to * make sure that all entries in RSS indirect table * are valid. * * NOTE: hn_set_ring_inuse() _must_ have been called. */ hn_rss_ind_fixup(sc); } error = hn_rndis_conf_rss(sc, NDIS_RSS_FLAG_NONE); if (error) goto failed; back: /* * Fixup transmission aggregation setup. */ hn_set_txagg(sc); return (0); failed: if (sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) { hn_synth_detach(sc); } else { if (attached & ATTACHED_RNDIS) hn_rndis_detach(sc); if (attached & ATTACHED_NVS) hn_nvs_detach(sc); hn_chan_detach(sc, sc->hn_prichan); /* Restore old capabilities. */ sc->hn_caps = old_caps; } return (error); #undef ATTACHED_RNDIS #undef ATTACHED_NVS } /* * NOTE: * The interface must have been suspended though hn_suspend(), before * this function get called. */ static void hn_synth_detach(struct hn_softc *sc) { KASSERT(sc->hn_flags & HN_FLAG_SYNTH_ATTACHED, ("synthetic parts were not attached")); /* Detach the RNDIS first. */ hn_rndis_detach(sc); /* Detach NVS. */ hn_nvs_detach(sc); /* Detach all of the channels. */ hn_detach_allchans(sc); sc->hn_flags &= ~HN_FLAG_SYNTH_ATTACHED; } static void hn_set_ring_inuse(struct hn_softc *sc, int ring_cnt) { KASSERT(ring_cnt > 0 && ring_cnt <= sc->hn_rx_ring_cnt, ("invalid ring count %d", ring_cnt)); if (sc->hn_tx_ring_cnt > ring_cnt) sc->hn_tx_ring_inuse = ring_cnt; else sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt; sc->hn_rx_ring_inuse = ring_cnt; #ifdef RSS if (sc->hn_rx_ring_inuse != rss_getnumbuckets()) { if_printf(sc->hn_ifp, "# of RX rings (%d) does not match " "# of RSS buckets (%d)\n", sc->hn_rx_ring_inuse, rss_getnumbuckets()); } #endif if (bootverbose) { if_printf(sc->hn_ifp, "%d TX ring, %d RX ring\n", sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse); } } static void hn_chan_drain(struct hn_softc *sc, struct vmbus_channel *chan) { /* * NOTE: * The TX bufring will not be drained by the hypervisor, * if the primary channel is revoked. */ while (!vmbus_chan_rx_empty(chan) || (!vmbus_chan_is_revoked(sc->hn_prichan) && !vmbus_chan_tx_empty(chan))) pause("waitch", 1); vmbus_chan_intr_drain(chan); } static void hn_suspend_data(struct hn_softc *sc) { struct vmbus_channel **subch = NULL; struct hn_tx_ring *txr; int i, nsubch; HN_LOCK_ASSERT(sc); /* * Suspend TX. */ for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { txr = &sc->hn_tx_ring[i]; mtx_lock(&txr->hn_tx_lock); txr->hn_suspended = 1; mtx_unlock(&txr->hn_tx_lock); /* No one is able send more packets now. */ /* * Wait for all pending sends to finish. * * NOTE: * We will _not_ receive all pending send-done, if the * primary channel is revoked. */ while (hn_tx_ring_pending(txr) && !vmbus_chan_is_revoked(sc->hn_prichan)) pause("hnwtx", 1 /* 1 tick */); } /* * Disable RX by clearing RX filter. */ hn_set_rxfilter(sc, NDIS_PACKET_TYPE_NONE); /* * Give RNDIS enough time to flush all pending data packets. */ pause("waitrx", (200 * hz) / 1000); /* * Drain RX/TX bufrings and interrupts. */ nsubch = sc->hn_rx_ring_inuse - 1; if (nsubch > 0) subch = vmbus_subchan_get(sc->hn_prichan, nsubch); if (subch != NULL) { for (i = 0; i < nsubch; ++i) hn_chan_drain(sc, subch[i]); } hn_chan_drain(sc, sc->hn_prichan); if (subch != NULL) vmbus_subchan_rel(subch, nsubch); /* * Drain any pending TX tasks. * * NOTE: * The above hn_chan_drain() can dispatch TX tasks, so the TX * tasks will have to be drained _after_ the above hn_chan_drain() * calls. */ for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { txr = &sc->hn_tx_ring[i]; taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task); taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task); } } static void hn_suspend_mgmt_taskfunc(void *xsc, int pending __unused) { ((struct hn_softc *)xsc)->hn_mgmt_taskq = NULL; } static void hn_suspend_mgmt(struct hn_softc *sc) { struct task task; HN_LOCK_ASSERT(sc); /* * Make sure that hn_mgmt_taskq0 can nolonger be accessed * through hn_mgmt_taskq. */ TASK_INIT(&task, 0, hn_suspend_mgmt_taskfunc, sc); vmbus_chan_run_task(sc->hn_prichan, &task); /* * Make sure that all pending management tasks are completed. */ taskqueue_drain(sc->hn_mgmt_taskq0, &sc->hn_netchg_init); taskqueue_drain_timeout(sc->hn_mgmt_taskq0, &sc->hn_netchg_status); taskqueue_drain_all(sc->hn_mgmt_taskq0); } static void hn_suspend(struct hn_softc *sc) { /* Disable polling. */ hn_polling(sc, 0); - if (sc->hn_ifp->if_drv_flags & IFF_DRV_RUNNING) + if ((sc->hn_ifp->if_drv_flags & IFF_DRV_RUNNING) || + (sc->hn_flags & HN_FLAG_VF)) hn_suspend_data(sc); hn_suspend_mgmt(sc); } static void hn_resume_tx(struct hn_softc *sc, int tx_ring_cnt) { int i; KASSERT(tx_ring_cnt <= sc->hn_tx_ring_cnt, ("invalid TX ring count %d", tx_ring_cnt)); for (i = 0; i < tx_ring_cnt; ++i) { struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; mtx_lock(&txr->hn_tx_lock); txr->hn_suspended = 0; mtx_unlock(&txr->hn_tx_lock); } } static void hn_resume_data(struct hn_softc *sc) { int i; HN_LOCK_ASSERT(sc); /* * Re-enable RX. */ hn_rxfilter_config(sc); /* * Make sure to clear suspend status on "all" TX rings, * since hn_tx_ring_inuse can be changed after * hn_suspend_data(). */ hn_resume_tx(sc, sc->hn_tx_ring_cnt); #ifdef HN_IFSTART_SUPPORT if (!hn_use_if_start) #endif { /* * Flush unused drbrs, since hn_tx_ring_inuse may be * reduced. */ for (i = sc->hn_tx_ring_inuse; i < sc->hn_tx_ring_cnt; ++i) hn_tx_ring_qflush(&sc->hn_tx_ring[i]); } /* * Kick start TX. */ for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; /* * Use txeof task, so that any pending oactive can be * cleared properly. */ taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); } } static void hn_resume_mgmt(struct hn_softc *sc) { sc->hn_mgmt_taskq = sc->hn_mgmt_taskq0; /* * Kick off network change detection, if it was pending. * If no network change was pending, start link status * checks, which is more lightweight than network change * detection. */ if (sc->hn_link_flags & HN_LINK_FLAG_NETCHG) hn_change_network(sc); else hn_update_link_status(sc); } static void hn_resume(struct hn_softc *sc) { - if (sc->hn_ifp->if_drv_flags & IFF_DRV_RUNNING) + if ((sc->hn_ifp->if_drv_flags & IFF_DRV_RUNNING) || + (sc->hn_flags & HN_FLAG_VF)) hn_resume_data(sc); - hn_resume_mgmt(sc); + + /* + * When the VF is activated, the synthetic interface is changed + * to DOWN in hn_set_vf(). Here, if the VF is still active, we + * don't call hn_resume_mgmt() until the VF is deactivated in + * hn_set_vf(). + */ + if (!(sc->hn_flags & HN_FLAG_VF)) + hn_resume_mgmt(sc); /* * Re-enable polling if this interface is running and * the polling is requested. */ if ((sc->hn_ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->hn_pollhz > 0) hn_polling(sc, sc->hn_pollhz); } static void hn_rndis_rx_status(struct hn_softc *sc, const void *data, int dlen) { const struct rndis_status_msg *msg; int ofs; if (dlen < sizeof(*msg)) { if_printf(sc->hn_ifp, "invalid RNDIS status\n"); return; } msg = data; switch (msg->rm_status) { case RNDIS_STATUS_MEDIA_CONNECT: case RNDIS_STATUS_MEDIA_DISCONNECT: hn_update_link_status(sc); break; case RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG: /* Not really useful; ignore. */ break; case RNDIS_STATUS_NETWORK_CHANGE: ofs = RNDIS_STBUFOFFSET_ABS(msg->rm_stbufoffset); if (dlen < ofs + msg->rm_stbuflen || msg->rm_stbuflen < sizeof(uint32_t)) { if_printf(sc->hn_ifp, "network changed\n"); } else { uint32_t change; memcpy(&change, ((const uint8_t *)msg) + ofs, sizeof(change)); if_printf(sc->hn_ifp, "network changed, change %u\n", change); } hn_change_network(sc); break; default: if_printf(sc->hn_ifp, "unknown RNDIS status 0x%08x\n", msg->rm_status); break; } } static int hn_rndis_rxinfo(const void *info_data, int info_dlen, struct hn_rxinfo *info) { const struct rndis_pktinfo *pi = info_data; uint32_t mask = 0; while (info_dlen != 0) { const void *data; uint32_t dlen; if (__predict_false(info_dlen < sizeof(*pi))) return (EINVAL); if (__predict_false(info_dlen < pi->rm_size)) return (EINVAL); info_dlen -= pi->rm_size; if (__predict_false(pi->rm_size & RNDIS_PKTINFO_SIZE_ALIGNMASK)) return (EINVAL); if (__predict_false(pi->rm_size < pi->rm_pktinfooffset)) return (EINVAL); dlen = pi->rm_size - pi->rm_pktinfooffset; data = pi->rm_data; switch (pi->rm_type) { case NDIS_PKTINFO_TYPE_VLAN: if (__predict_false(dlen < NDIS_VLAN_INFO_SIZE)) return (EINVAL); info->vlan_info = *((const uint32_t *)data); mask |= HN_RXINFO_VLAN; break; case NDIS_PKTINFO_TYPE_CSUM: if (__predict_false(dlen < NDIS_RXCSUM_INFO_SIZE)) return (EINVAL); info->csum_info = *((const uint32_t *)data); mask |= HN_RXINFO_CSUM; break; case HN_NDIS_PKTINFO_TYPE_HASHVAL: if (__predict_false(dlen < HN_NDIS_HASH_VALUE_SIZE)) return (EINVAL); info->hash_value = *((const uint32_t *)data); mask |= HN_RXINFO_HASHVAL; break; case HN_NDIS_PKTINFO_TYPE_HASHINF: if (__predict_false(dlen < HN_NDIS_HASH_INFO_SIZE)) return (EINVAL); info->hash_info = *((const uint32_t *)data); mask |= HN_RXINFO_HASHINF; break; default: goto next; } if (mask == HN_RXINFO_ALL) { /* All found; done */ break; } next: pi = (const struct rndis_pktinfo *) ((const uint8_t *)pi + pi->rm_size); } /* * Final fixup. * - If there is no hash value, invalidate the hash info. */ if ((mask & HN_RXINFO_HASHVAL) == 0) info->hash_info = HN_NDIS_HASH_INFO_INVALID; return (0); } static __inline bool hn_rndis_check_overlap(int off, int len, int check_off, int check_len) { if (off < check_off) { if (__predict_true(off + len <= check_off)) return (false); } else if (off > check_off) { if (__predict_true(check_off + check_len <= off)) return (false); } return (true); } static void hn_rndis_rx_data(struct hn_rx_ring *rxr, const void *data, int dlen) { const struct rndis_packet_msg *pkt; struct hn_rxinfo info; int data_off, pktinfo_off, data_len, pktinfo_len; /* * Check length. */ if (__predict_false(dlen < sizeof(*pkt))) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg\n"); return; } pkt = data; if (__predict_false(dlen < pkt->rm_len)) { if_printf(rxr->hn_ifp, "truncated RNDIS packet msg, " "dlen %d, msglen %u\n", dlen, pkt->rm_len); return; } if (__predict_false(pkt->rm_len < pkt->rm_datalen + pkt->rm_oobdatalen + pkt->rm_pktinfolen)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msglen, " "msglen %u, data %u, oob %u, pktinfo %u\n", pkt->rm_len, pkt->rm_datalen, pkt->rm_oobdatalen, pkt->rm_pktinfolen); return; } if (__predict_false(pkt->rm_datalen == 0)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, no data\n"); return; } /* * Check offests. */ #define IS_OFFSET_INVALID(ofs) \ ((ofs) < RNDIS_PACKET_MSG_OFFSET_MIN || \ ((ofs) & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)) /* XXX Hyper-V does not meet data offset alignment requirement */ if (__predict_false(pkt->rm_dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "data offset %u\n", pkt->rm_dataoffset); return; } if (__predict_false(pkt->rm_oobdataoffset > 0 && IS_OFFSET_INVALID(pkt->rm_oobdataoffset))) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "oob offset %u\n", pkt->rm_oobdataoffset); return; } if (__predict_true(pkt->rm_pktinfooffset > 0) && __predict_false(IS_OFFSET_INVALID(pkt->rm_pktinfooffset))) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "pktinfo offset %u\n", pkt->rm_pktinfooffset); return; } #undef IS_OFFSET_INVALID data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->rm_dataoffset); data_len = pkt->rm_datalen; pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->rm_pktinfooffset); pktinfo_len = pkt->rm_pktinfolen; /* * Check OOB coverage. */ if (__predict_false(pkt->rm_oobdatalen != 0)) { int oob_off, oob_len; if_printf(rxr->hn_ifp, "got oobdata\n"); oob_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->rm_oobdataoffset); oob_len = pkt->rm_oobdatalen; if (__predict_false(oob_off + oob_len > pkt->rm_len)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "oob overflow, msglen %u, oob abs %d len %d\n", pkt->rm_len, oob_off, oob_len); return; } /* * Check against data. */ if (hn_rndis_check_overlap(oob_off, oob_len, data_off, data_len)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "oob overlaps data, oob abs %d len %d, " "data abs %d len %d\n", oob_off, oob_len, data_off, data_len); return; } /* * Check against pktinfo. */ if (pktinfo_len != 0 && hn_rndis_check_overlap(oob_off, oob_len, pktinfo_off, pktinfo_len)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "oob overlaps pktinfo, oob abs %d len %d, " "pktinfo abs %d len %d\n", oob_off, oob_len, pktinfo_off, pktinfo_len); return; } } /* * Check per-packet-info coverage and find useful per-packet-info. */ info.vlan_info = HN_NDIS_VLAN_INFO_INVALID; info.csum_info = HN_NDIS_RXCSUM_INFO_INVALID; info.hash_info = HN_NDIS_HASH_INFO_INVALID; if (__predict_true(pktinfo_len != 0)) { bool overlap; int error; if (__predict_false(pktinfo_off + pktinfo_len > pkt->rm_len)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "pktinfo overflow, msglen %u, " "pktinfo abs %d len %d\n", pkt->rm_len, pktinfo_off, pktinfo_len); return; } /* * Check packet info coverage. */ overlap = hn_rndis_check_overlap(pktinfo_off, pktinfo_len, data_off, data_len); if (__predict_false(overlap)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "pktinfo overlap data, pktinfo abs %d len %d, " "data abs %d len %d\n", pktinfo_off, pktinfo_len, data_off, data_len); return; } /* * Find useful per-packet-info. */ error = hn_rndis_rxinfo(((const uint8_t *)pkt) + pktinfo_off, pktinfo_len, &info); if (__predict_false(error)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg " "pktinfo\n"); return; } } if (__predict_false(data_off + data_len > pkt->rm_len)) { if_printf(rxr->hn_ifp, "invalid RNDIS packet msg, " "data overflow, msglen %u, data abs %d len %d\n", pkt->rm_len, data_off, data_len); return; } hn_rxpkt(rxr, ((const uint8_t *)pkt) + data_off, data_len, &info); } static __inline void hn_rndis_rxpkt(struct hn_rx_ring *rxr, const void *data, int dlen) { const struct rndis_msghdr *hdr; if (__predict_false(dlen < sizeof(*hdr))) { if_printf(rxr->hn_ifp, "invalid RNDIS msg\n"); return; } hdr = data; if (__predict_true(hdr->rm_type == REMOTE_NDIS_PACKET_MSG)) { /* Hot data path. */ hn_rndis_rx_data(rxr, data, dlen); /* Done! */ return; } if (hdr->rm_type == REMOTE_NDIS_INDICATE_STATUS_MSG) hn_rndis_rx_status(rxr->hn_ifp->if_softc, data, dlen); else hn_rndis_rx_ctrl(rxr->hn_ifp->if_softc, data, dlen); } static void hn_nvs_handle_notify(struct hn_softc *sc, const struct vmbus_chanpkt_hdr *pkt) { const struct hn_nvs_hdr *hdr; if (VMBUS_CHANPKT_DATALEN(pkt) < sizeof(*hdr)) { if_printf(sc->hn_ifp, "invalid nvs notify\n"); return; } hdr = VMBUS_CHANPKT_CONST_DATA(pkt); if (hdr->nvs_type == HN_NVS_TYPE_TXTBL_NOTE) { /* Useless; ignore */ return; } if_printf(sc->hn_ifp, "got notify, nvs type %u\n", hdr->nvs_type); } static void hn_nvs_handle_comp(struct hn_softc *sc, struct vmbus_channel *chan, const struct vmbus_chanpkt_hdr *pkt) { struct hn_nvs_sendctx *sndc; sndc = (struct hn_nvs_sendctx *)(uintptr_t)pkt->cph_xactid; sndc->hn_cb(sndc, sc, chan, VMBUS_CHANPKT_CONST_DATA(pkt), VMBUS_CHANPKT_DATALEN(pkt)); /* * NOTE: * 'sndc' CAN NOT be accessed anymore, since it can be freed by * its callback. */ } static void hn_nvs_handle_rxbuf(struct hn_rx_ring *rxr, struct vmbus_channel *chan, const struct vmbus_chanpkt_hdr *pkthdr) { const struct vmbus_chanpkt_rxbuf *pkt; const struct hn_nvs_hdr *nvs_hdr; int count, i, hlen; if (__predict_false(VMBUS_CHANPKT_DATALEN(pkthdr) < sizeof(*nvs_hdr))) { if_printf(rxr->hn_ifp, "invalid nvs RNDIS\n"); return; } nvs_hdr = VMBUS_CHANPKT_CONST_DATA(pkthdr); /* Make sure that this is a RNDIS message. */ if (__predict_false(nvs_hdr->nvs_type != HN_NVS_TYPE_RNDIS)) { if_printf(rxr->hn_ifp, "nvs type %u, not RNDIS\n", nvs_hdr->nvs_type); return; } hlen = VMBUS_CHANPKT_GETLEN(pkthdr->cph_hlen); if (__predict_false(hlen < sizeof(*pkt))) { if_printf(rxr->hn_ifp, "invalid rxbuf chanpkt\n"); return; } pkt = (const struct vmbus_chanpkt_rxbuf *)pkthdr; if (__predict_false(pkt->cp_rxbuf_id != HN_NVS_RXBUF_SIG)) { if_printf(rxr->hn_ifp, "invalid rxbuf_id 0x%08x\n", pkt->cp_rxbuf_id); return; } count = pkt->cp_rxbuf_cnt; if (__predict_false(hlen < __offsetof(struct vmbus_chanpkt_rxbuf, cp_rxbuf[count]))) { if_printf(rxr->hn_ifp, "invalid rxbuf_cnt %d\n", count); return; } /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */ for (i = 0; i < count; ++i) { int ofs, len; ofs = pkt->cp_rxbuf[i].rb_ofs; len = pkt->cp_rxbuf[i].rb_len; if (__predict_false(ofs + len > HN_RXBUF_SIZE)) { if_printf(rxr->hn_ifp, "%dth RNDIS msg overflow rxbuf, " "ofs %d, len %d\n", i, ofs, len); continue; } hn_rndis_rxpkt(rxr, rxr->hn_rxbuf + ofs, len); } /* * Ack the consumed RXBUF associated w/ this channel packet, * so that this RXBUF can be recycled by the hypervisor. */ hn_nvs_ack_rxbuf(rxr, chan, pkt->cp_hdr.cph_xactid); } static void hn_nvs_ack_rxbuf(struct hn_rx_ring *rxr, struct vmbus_channel *chan, uint64_t tid) { struct hn_nvs_rndis_ack ack; int retries, error; ack.nvs_type = HN_NVS_TYPE_RNDIS_ACK; ack.nvs_status = HN_NVS_STATUS_OK; retries = 0; again: error = vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP, VMBUS_CHANPKT_FLAG_NONE, &ack, sizeof(ack), tid); if (__predict_false(error == EAGAIN)) { /* * NOTE: * This should _not_ happen in real world, since the * consumption of the TX bufring from the TX path is * controlled. */ if (rxr->hn_ack_failed == 0) if_printf(rxr->hn_ifp, "RXBUF ack retry\n"); rxr->hn_ack_failed++; retries++; if (retries < 10) { DELAY(100); goto again; } /* RXBUF leaks! */ if_printf(rxr->hn_ifp, "RXBUF ack failed\n"); } } static void hn_chan_callback(struct vmbus_channel *chan, void *xrxr) { struct hn_rx_ring *rxr = xrxr; struct hn_softc *sc = rxr->hn_ifp->if_softc; for (;;) { struct vmbus_chanpkt_hdr *pkt = rxr->hn_pktbuf; int error, pktlen; pktlen = rxr->hn_pktbuf_len; error = vmbus_chan_recv_pkt(chan, pkt, &pktlen); if (__predict_false(error == ENOBUFS)) { void *nbuf; int nlen; /* * Expand channel packet buffer. * * XXX * Use M_WAITOK here, since allocation failure * is fatal. */ nlen = rxr->hn_pktbuf_len * 2; while (nlen < pktlen) nlen *= 2; nbuf = malloc(nlen, M_DEVBUF, M_WAITOK); if_printf(rxr->hn_ifp, "expand pktbuf %d -> %d\n", rxr->hn_pktbuf_len, nlen); free(rxr->hn_pktbuf, M_DEVBUF); rxr->hn_pktbuf = nbuf; rxr->hn_pktbuf_len = nlen; /* Retry! */ continue; } else if (__predict_false(error == EAGAIN)) { /* No more channel packets; done! */ break; } KASSERT(!error, ("vmbus_chan_recv_pkt failed: %d", error)); switch (pkt->cph_type) { case VMBUS_CHANPKT_TYPE_COMP: hn_nvs_handle_comp(sc, chan, pkt); break; case VMBUS_CHANPKT_TYPE_RXBUF: hn_nvs_handle_rxbuf(rxr, chan, pkt); break; case VMBUS_CHANPKT_TYPE_INBAND: hn_nvs_handle_notify(sc, pkt); break; default: if_printf(rxr->hn_ifp, "unknown chan pkt %u\n", pkt->cph_type); break; } } hn_chan_rollup(rxr, rxr->hn_txr); } static void hn_tx_taskq_create(void *arg __unused) { int i; /* * Fix the # of TX taskqueues. */ if (hn_tx_taskq_cnt <= 0) hn_tx_taskq_cnt = 1; else if (hn_tx_taskq_cnt > mp_ncpus) hn_tx_taskq_cnt = mp_ncpus; /* * Fix the TX taskqueue mode. */ switch (hn_tx_taskq_mode) { case HN_TX_TASKQ_M_INDEP: case HN_TX_TASKQ_M_GLOBAL: case HN_TX_TASKQ_M_EVTTQ: break; default: hn_tx_taskq_mode = HN_TX_TASKQ_M_INDEP; break; } if (vm_guest != VM_GUEST_HV) return; if (hn_tx_taskq_mode != HN_TX_TASKQ_M_GLOBAL) return; hn_tx_taskque = malloc(hn_tx_taskq_cnt * sizeof(struct taskqueue *), M_DEVBUF, M_WAITOK); for (i = 0; i < hn_tx_taskq_cnt; ++i) { hn_tx_taskque[i] = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &hn_tx_taskque[i]); taskqueue_start_threads(&hn_tx_taskque[i], 1, PI_NET, "hn tx%d", i); } } SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_SECOND, hn_tx_taskq_create, NULL); static void hn_tx_taskq_destroy(void *arg __unused) { if (hn_tx_taskque != NULL) { int i; for (i = 0; i < hn_tx_taskq_cnt; ++i) taskqueue_free(hn_tx_taskque[i]); free(hn_tx_taskque, M_DEVBUF); } } SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_SECOND, hn_tx_taskq_destroy, NULL); Index: projects/clang400-import/sys/dev/hyperv/netvsc/if_hnreg.h =================================================================== --- projects/clang400-import/sys/dev/hyperv/netvsc/if_hnreg.h (revision 312719) +++ projects/clang400-import/sys/dev/hyperv/netvsc/if_hnreg.h (revision 312720) @@ -1,245 +1,256 @@ /*- * Copyright (c) 2016 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _IF_HNREG_H_ #define _IF_HNREG_H_ #include #include /* * NDIS protocol version numbers */ #define HN_NDIS_VERSION_6_1 0x00060001 #define HN_NDIS_VERSION_6_20 0x00060014 #define HN_NDIS_VERSION_6_30 0x0006001e #define HN_NDIS_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16) #define HN_NDIS_VERSION_MINOR(ver) ((ver) & 0xffff) /* * NVS versions. */ #define HN_NVS_VERSION_1 0x00002 #define HN_NVS_VERSION_2 0x30002 #define HN_NVS_VERSION_4 0x40000 #define HN_NVS_VERSION_5 0x50000 #define HN_NVS_RXBUF_SIG 0xcafe #define HN_NVS_CHIM_SIG 0xface #define HN_NVS_CHIM_IDX_INVALID 0xffffffff #define HN_NVS_RNDIS_MTYPE_DATA 0 #define HN_NVS_RNDIS_MTYPE_CTRL 1 /* * NVS message transacion status codes. */ #define HN_NVS_STATUS_OK 1 #define HN_NVS_STATUS_FAILED 2 /* * NVS request/response message types. */ #define HN_NVS_TYPE_INIT 1 #define HN_NVS_TYPE_INIT_RESP 2 #define HN_NVS_TYPE_NDIS_INIT 100 #define HN_NVS_TYPE_RXBUF_CONN 101 #define HN_NVS_TYPE_RXBUF_CONNRESP 102 #define HN_NVS_TYPE_RXBUF_DISCONN 103 #define HN_NVS_TYPE_CHIM_CONN 104 #define HN_NVS_TYPE_CHIM_CONNRESP 105 #define HN_NVS_TYPE_CHIM_DISCONN 106 #define HN_NVS_TYPE_RNDIS 107 #define HN_NVS_TYPE_RNDIS_ACK 108 #define HN_NVS_TYPE_NDIS_CONF 125 #define HN_NVS_TYPE_VFASSOC_NOTE 128 /* notification */ #define HN_NVS_TYPE_SET_DATAPATH 129 #define HN_NVS_TYPE_SUBCH_REQ 133 #define HN_NVS_TYPE_SUBCH_RESP 133 /* same as SUBCH_REQ */ #define HN_NVS_TYPE_TXTBL_NOTE 134 /* notification */ /* * Any size less than this one will _not_ work, e.g. hn_nvs_init * only has 12B valid data, however, if only 12B data were sent, * Hypervisor would never reply. */ #define HN_NVS_REQSIZE_MIN 32 /* NVS message common header */ struct hn_nvs_hdr { uint32_t nvs_type; } __packed; struct hn_nvs_init { uint32_t nvs_type; /* HN_NVS_TYPE_INIT */ uint32_t nvs_ver_min; uint32_t nvs_ver_max; uint8_t nvs_rsvd[20]; } __packed; CTASSERT(sizeof(struct hn_nvs_init) >= HN_NVS_REQSIZE_MIN); struct hn_nvs_init_resp { uint32_t nvs_type; /* HN_NVS_TYPE_INIT_RESP */ uint32_t nvs_ver; /* deprecated */ uint32_t nvs_rsvd; uint32_t nvs_status; /* HN_NVS_STATUS_ */ } __packed; /* No reponse */ struct hn_nvs_ndis_conf { uint32_t nvs_type; /* HN_NVS_TYPE_NDIS_CONF */ uint32_t nvs_mtu; uint32_t nvs_rsvd; uint64_t nvs_caps; /* HN_NVS_NDIS_CONF_ */ uint8_t nvs_rsvd1[12]; } __packed; CTASSERT(sizeof(struct hn_nvs_ndis_conf) >= HN_NVS_REQSIZE_MIN); #define HN_NVS_NDIS_CONF_SRIOV 0x0004 #define HN_NVS_NDIS_CONF_VLAN 0x0008 /* No response */ struct hn_nvs_ndis_init { uint32_t nvs_type; /* HN_NVS_TYPE_NDIS_INIT */ uint32_t nvs_ndis_major; /* NDIS_VERSION_MAJOR_ */ uint32_t nvs_ndis_minor; /* NDIS_VERSION_MINOR_ */ uint8_t nvs_rsvd[20]; } __packed; CTASSERT(sizeof(struct hn_nvs_ndis_init) >= HN_NVS_REQSIZE_MIN); +#define HN_NVS_DATAPATH_SYNTHETIC 0 +#define HN_NVS_DATAPATH_VF 1 + +/* No response */ +struct hn_nvs_datapath { + uint32_t nvs_type; /* HN_NVS_TYPE_SET_DATAPATH */ + uint32_t nvs_active_path;/* HN_NVS_DATAPATH_* */ + uint32_t nvs_rsvd[6]; +} __packed; +CTASSERT(sizeof(struct hn_nvs_datapath) >= HN_NVS_REQSIZE_MIN); + struct hn_nvs_rxbuf_conn { uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_CONN */ uint32_t nvs_gpadl; /* RXBUF vmbus GPADL */ uint16_t nvs_sig; /* HN_NVS_RXBUF_SIG */ uint8_t nvs_rsvd[22]; } __packed; CTASSERT(sizeof(struct hn_nvs_rxbuf_conn) >= HN_NVS_REQSIZE_MIN); struct hn_nvs_rxbuf_sect { uint32_t nvs_start; uint32_t nvs_slotsz; uint32_t nvs_slotcnt; uint32_t nvs_end; } __packed; struct hn_nvs_rxbuf_connresp { uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_CONNRESP */ uint32_t nvs_status; /* HN_NVS_STATUS_ */ uint32_t nvs_nsect; /* # of elem in nvs_sect */ struct hn_nvs_rxbuf_sect nvs_sect[]; } __packed; /* No response */ struct hn_nvs_rxbuf_disconn { uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_DISCONN */ uint16_t nvs_sig; /* HN_NVS_RXBUF_SIG */ uint8_t nvs_rsvd[26]; } __packed; CTASSERT(sizeof(struct hn_nvs_rxbuf_disconn) >= HN_NVS_REQSIZE_MIN); struct hn_nvs_chim_conn { uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_CONN */ uint32_t nvs_gpadl; /* chimney buf vmbus GPADL */ uint16_t nvs_sig; /* NDIS_NVS_CHIM_SIG */ uint8_t nvs_rsvd[22]; } __packed; CTASSERT(sizeof(struct hn_nvs_chim_conn) >= HN_NVS_REQSIZE_MIN); struct hn_nvs_chim_connresp { uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_CONNRESP */ uint32_t nvs_status; /* HN_NVS_STATUS_ */ uint32_t nvs_sectsz; /* section size */ } __packed; /* No response */ struct hn_nvs_chim_disconn { uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_DISCONN */ uint16_t nvs_sig; /* HN_NVS_CHIM_SIG */ uint8_t nvs_rsvd[26]; } __packed; CTASSERT(sizeof(struct hn_nvs_chim_disconn) >= HN_NVS_REQSIZE_MIN); #define HN_NVS_SUBCH_OP_ALLOC 1 struct hn_nvs_subch_req { uint32_t nvs_type; /* HN_NVS_TYPE_SUBCH_REQ */ uint32_t nvs_op; /* HN_NVS_SUBCH_OP_ */ uint32_t nvs_nsubch; uint8_t nvs_rsvd[20]; } __packed; CTASSERT(sizeof(struct hn_nvs_subch_req) >= HN_NVS_REQSIZE_MIN); struct hn_nvs_subch_resp { uint32_t nvs_type; /* HN_NVS_TYPE_SUBCH_RESP */ uint32_t nvs_status; /* HN_NVS_STATUS_ */ uint32_t nvs_nsubch; } __packed; struct hn_nvs_rndis { uint32_t nvs_type; /* HN_NVS_TYPE_RNDIS */ uint32_t nvs_rndis_mtype;/* HN_NVS_RNDIS_MTYPE_ */ /* * Chimney sending buffer index and size. * * NOTE: * If nvs_chim_idx is set to HN_NVS_CHIM_IDX_INVALID * and nvs_chim_sz is set to 0, then chimney sending * buffer is _not_ used by this RNDIS message. */ uint32_t nvs_chim_idx; uint32_t nvs_chim_sz; uint8_t nvs_rsvd[16]; } __packed; CTASSERT(sizeof(struct hn_nvs_rndis) >= HN_NVS_REQSIZE_MIN); struct hn_nvs_rndis_ack { uint32_t nvs_type; /* HN_NVS_TYPE_RNDIS_ACK */ uint32_t nvs_status; /* HN_NVS_STATUS_ */ uint8_t nvs_rsvd[24]; } __packed; CTASSERT(sizeof(struct hn_nvs_rndis_ack) >= HN_NVS_REQSIZE_MIN); /* * RNDIS extension */ /* Per-packet hash info */ #define HN_NDIS_HASH_INFO_SIZE sizeof(uint32_t) #define HN_NDIS_PKTINFO_TYPE_HASHINF NDIS_PKTINFO_TYPE_ORIG_NBLIST /* NDIS_HASH_ */ /* Per-packet hash value */ #define HN_NDIS_HASH_VALUE_SIZE sizeof(uint32_t) #define HN_NDIS_PKTINFO_TYPE_HASHVAL NDIS_PKTINFO_TYPE_PKT_CANCELID /* Per-packet-info size */ #define HN_RNDIS_PKTINFO_SIZE(dlen) \ __offsetof(struct rndis_pktinfo, rm_data[dlen]) #endif /* !_IF_HNREG_H_ */ Index: projects/clang400-import/sys/dev/hyperv/netvsc/if_hnvar.h =================================================================== --- projects/clang400-import/sys/dev/hyperv/netvsc/if_hnvar.h (revision 312719) +++ projects/clang400-import/sys/dev/hyperv/netvsc/if_hnvar.h (revision 312720) @@ -1,280 +1,287 @@ /*- * Copyright (c) 2016 Microsoft Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _IF_HNVAR_H_ #define _IF_HNVAR_H_ #define HN_USE_TXDESC_BUFRING #define HN_CHIM_SIZE (15 * 1024 * 1024) #define HN_RXBUF_SIZE (16 * 1024 * 1024) #define HN_RXBUF_SIZE_COMPAT (15 * 1024 * 1024) /* Claimed to be 12232B */ #define HN_MTU_MAX (9 * 1024) #define HN_TXBR_SIZE (128 * PAGE_SIZE) #define HN_RXBR_SIZE (128 * PAGE_SIZE) #define HN_XACT_REQ_PGCNT 2 #define HN_XACT_RESP_PGCNT 2 #define HN_XACT_REQ_SIZE (HN_XACT_REQ_PGCNT * PAGE_SIZE) #define HN_XACT_RESP_SIZE (HN_XACT_RESP_PGCNT * PAGE_SIZE) #define HN_GPACNT_MAX 32 struct hn_txdesc; #ifndef HN_USE_TXDESC_BUFRING SLIST_HEAD(hn_txdesc_list, hn_txdesc); #else struct buf_ring; #endif struct hn_tx_ring; struct hn_rx_ring { struct ifnet *hn_ifp; + struct ifnet *hn_vf; /* SR-IOV VF */ struct hn_tx_ring *hn_txr; void *hn_pktbuf; int hn_pktbuf_len; uint8_t *hn_rxbuf; /* shadow sc->hn_rxbuf */ int hn_rx_idx; /* Trust csum verification on host side */ int hn_trust_hcsum; /* HN_TRUST_HCSUM_ */ struct lro_ctrl hn_lro; u_long hn_csum_ip; u_long hn_csum_tcp; u_long hn_csum_udp; u_long hn_csum_trusted; u_long hn_lro_tried; u_long hn_small_pkts; u_long hn_pkts; u_long hn_rss_pkts; u_long hn_ack_failed; /* Rarely used stuffs */ struct sysctl_oid *hn_rx_sysctl_tree; int hn_rx_flags; void *hn_br; /* TX/RX bufring */ struct hyperv_dma hn_br_dma; + + struct vmbus_channel *hn_chan; } __aligned(CACHE_LINE_SIZE); #define HN_TRUST_HCSUM_IP 0x0001 #define HN_TRUST_HCSUM_TCP 0x0002 #define HN_TRUST_HCSUM_UDP 0x0004 #define HN_RX_FLAG_ATTACHED 0x0001 #define HN_RX_FLAG_BR_REF 0x0002 struct hn_tx_ring { #ifndef HN_USE_TXDESC_BUFRING struct mtx hn_txlist_spin; struct hn_txdesc_list hn_txlist; #else struct buf_ring *hn_txdesc_br; #endif int hn_txdesc_cnt; int hn_txdesc_avail; u_short hn_has_txeof; u_short hn_txdone_cnt; int hn_sched_tx; void (*hn_txeof)(struct hn_tx_ring *); struct taskqueue *hn_tx_taskq; struct task hn_tx_task; struct task hn_txeof_task; struct buf_ring *hn_mbuf_br; int hn_oactive; int hn_tx_idx; int hn_tx_flags; struct mtx hn_tx_lock; struct hn_softc *hn_sc; struct vmbus_channel *hn_chan; int hn_direct_tx_size; int hn_chim_size; bus_dma_tag_t hn_tx_data_dtag; uint64_t hn_csum_assist; /* Applied packet transmission aggregation limits. */ int hn_agg_szmax; short hn_agg_pktmax; short hn_agg_align; /* Packet transmission aggregation states. */ struct hn_txdesc *hn_agg_txd; int hn_agg_szleft; short hn_agg_pktleft; struct rndis_packet_msg *hn_agg_prevpkt; /* Temporary stats for each sends. */ int hn_stat_size; short hn_stat_pkts; short hn_stat_mcasts; int (*hn_sendpkt)(struct hn_tx_ring *, struct hn_txdesc *); int hn_suspended; int hn_gpa_cnt; struct vmbus_gpa hn_gpa[HN_GPACNT_MAX]; u_long hn_no_txdescs; u_long hn_send_failed; u_long hn_txdma_failed; u_long hn_tx_collapsed; u_long hn_tx_chimney_tried; u_long hn_tx_chimney; u_long hn_pkts; u_long hn_sends; u_long hn_flush_failed; /* Rarely used stuffs */ struct hn_txdesc *hn_txdesc; bus_dma_tag_t hn_tx_rndis_dtag; struct sysctl_oid *hn_tx_sysctl_tree; } __aligned(CACHE_LINE_SIZE); #define HN_TX_FLAG_ATTACHED 0x0001 #define HN_TX_FLAG_HASHVAL 0x0002 /* support HASHVAL pktinfo */ /* * Device-specific softc structure */ struct hn_softc { struct ifnet *hn_ifp; struct ifmedia hn_media; device_t hn_dev; int hn_if_flags; struct sx hn_lock; struct vmbus_channel *hn_prichan; int hn_rx_ring_cnt; int hn_rx_ring_inuse; struct hn_rx_ring *hn_rx_ring; int hn_tx_ring_cnt; int hn_tx_ring_inuse; struct hn_tx_ring *hn_tx_ring; uint8_t *hn_chim; u_long *hn_chim_bmap; int hn_chim_bmap_cnt; int hn_chim_cnt; int hn_chim_szmax; int hn_cpu; struct taskqueue **hn_tx_taskqs; struct sysctl_oid *hn_tx_sysctl_tree; struct sysctl_oid *hn_rx_sysctl_tree; struct vmbus_xact_ctx *hn_xact; uint32_t hn_nvs_ver; uint32_t hn_rx_filter; /* Packet transmission aggregation user settings. */ int hn_agg_size; int hn_agg_pkts; struct taskqueue *hn_mgmt_taskq; struct taskqueue *hn_mgmt_taskq0; struct task hn_link_task; struct task hn_netchg_init; struct timeout_task hn_netchg_status; uint32_t hn_link_flags; /* HN_LINK_FLAG_ */ uint32_t hn_caps; /* HN_CAP_ */ uint32_t hn_flags; /* HN_FLAG_ */ u_int hn_pollhz; void *hn_rxbuf; uint32_t hn_rxbuf_gpadl; struct hyperv_dma hn_rxbuf_dma; uint32_t hn_chim_gpadl; struct hyperv_dma hn_chim_dma; uint32_t hn_rndis_rid; uint32_t hn_ndis_ver; int hn_ndis_tso_szmax; int hn_ndis_tso_sgmin; uint32_t hn_rndis_agg_size; uint32_t hn_rndis_agg_pkts; uint32_t hn_rndis_agg_align; int hn_rss_ind_size; uint32_t hn_rss_hash; /* NDIS_HASH_ */ struct ndis_rssprm_toeplitz hn_rss; + + eventhandler_tag hn_ifaddr_evthand; + eventhandler_tag hn_ifnet_evthand; }; #define HN_FLAG_RXBUF_CONNECTED 0x0001 #define HN_FLAG_CHIM_CONNECTED 0x0002 #define HN_FLAG_HAS_RSSKEY 0x0004 #define HN_FLAG_HAS_RSSIND 0x0008 #define HN_FLAG_SYNTH_ATTACHED 0x0010 #define HN_FLAG_NO_SLEEPING 0x0020 #define HN_FLAG_RXBUF_REF 0x0040 #define HN_FLAG_CHIM_REF 0x0080 +#define HN_FLAG_VF 0x0100 #define HN_FLAG_ERRORS (HN_FLAG_RXBUF_REF | HN_FLAG_CHIM_REF) #define HN_NO_SLEEPING(sc) \ do { \ (sc)->hn_flags |= HN_FLAG_NO_SLEEPING; \ } while (0) #define HN_SLEEPING_OK(sc) \ do { \ (sc)->hn_flags &= ~HN_FLAG_NO_SLEEPING; \ } while (0) #define HN_CAN_SLEEP(sc) \ (((sc)->hn_flags & HN_FLAG_NO_SLEEPING) == 0) #define HN_CAP_VLAN 0x0001 #define HN_CAP_MTU 0x0002 #define HN_CAP_IPCS 0x0004 #define HN_CAP_TCP4CS 0x0008 #define HN_CAP_TCP6CS 0x0010 #define HN_CAP_UDP4CS 0x0020 #define HN_CAP_UDP6CS 0x0040 #define HN_CAP_TSO4 0x0080 #define HN_CAP_TSO6 0x0100 #define HN_CAP_HASHVAL 0x0200 /* Capability description for use with printf(9) %b identifier. */ #define HN_CAP_BITS \ "\020\1VLAN\2MTU\3IPCS\4TCP4CS\5TCP6CS" \ "\6UDP4CS\7UDP6CS\10TSO4\11TSO6\12HASHVAL" #define HN_LINK_FLAG_LINKUP 0x0001 #define HN_LINK_FLAG_NETCHG 0x0002 #endif /* !_IF_HNVAR_H_ */ Index: projects/clang400-import/sys/dev/mrsas/mrsas_linux.c =================================================================== --- projects/clang400-import/sys/dev/mrsas/mrsas_linux.c (revision 312719) +++ projects/clang400-import/sys/dev/mrsas/mrsas_linux.c (revision 312720) @@ -1,140 +1,140 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Kashyap Desai, * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Kashyap Desai, * Sibananda Sahu Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include #if (__FreeBSD_version >= 1001511) #include #elif (__FreeBSD_version > 900000) -#include +#include #endif #include #include #include #include #include #include #if defined(__amd64__) /* Assume amd64 wants 32 bit Linux */ #include #include #else #include #include #endif #include #include #include #include /* There are multiple ioctl number ranges that need to be handled */ #define MRSAS_LINUX_IOCTL_MIN 0x4d00 #define MRSAS_LINUX_IOCTL_MAX 0x4d01 static linux_ioctl_function_t mrsas_linux_ioctl; static struct linux_ioctl_handler mrsas_linux_handler = {mrsas_linux_ioctl, MRSAS_LINUX_IOCTL_MIN, MRSAS_LINUX_IOCTL_MAX}; SYSINIT(mrsas_register, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &mrsas_linux_handler); SYSUNINIT(mrsas_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &mrsas_linux_handler); static struct linux_device_handler mrsas_device_handler = {"mrsas", "megaraid_sas", "mrsas0", "megaraid_sas_ioctl_node", -1, 0, 1}; SYSINIT(mrsas_register2, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_device_register_handler, &mrsas_device_handler); SYSUNINIT(mrsas_unregister2, SI_SUB_KLD, SI_ORDER_MIDDLE, linux_device_unregister_handler, &mrsas_device_handler); static int mrsas_linux_modevent(module_t mod __unused, int cmd __unused, void *data __unused) { return (0); } /* * mrsas_linux_ioctl: linux emulator IOCtl commands entry point. * * This function is the entry point for IOCtls from linux binaries. * It calls the mrsas_ioctl function for processing * depending on the IOCTL command received. */ static int mrsas_linux_ioctl(struct thread *p, struct linux_ioctl_args *args) { #if (__FreeBSD_version >= 1000000) cap_rights_t rights; #endif struct file *fp; int error; u_long cmd = args->cmd; if (cmd != MRSAS_LINUX_CMD32) { error = ENOTSUP; goto END; } #if (__FreeBSD_version >= 1000000) error = fget(p, args->fd, cap_rights_init(&rights, CAP_IOCTL), &fp); #elif (__FreeBSD_version <= 900000) error = fget(p, args->fd, &fp); #else /* For FreeBSD version greater than * 9.0.0 but less than 10.0.0 */ error = fget(p, args->fd, CAP_IOCTL, &fp); #endif if (error != 0) goto END; error = fo_ioctl(fp, cmd, (caddr_t)args->arg, p->td_ucred, p); fdrop(fp, p); END: return (error); } DEV_MODULE(mrsas_linux, mrsas_linux_modevent, NULL); MODULE_DEPEND(mrsas, linux, 1, 1, 1); Index: projects/clang400-import/sys/dev/rtwn/if_rtwn.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/if_rtwn.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/if_rtwn.c (revision 312720) @@ -1,2020 +1,1969 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for Realtek RTL8188CE-VAU/RTL8188CUS/RTL8188EU/RTL8188RU/RTL8192CU/RTL8812AU/RTL8821AU. */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void rtwn_radiotap_attach(struct rtwn_softc *); static void rtwn_vap_decrement_counters(struct rtwn_softc *, enum ieee80211_opmode, int); static void rtwn_set_ic_opmode(struct rtwn_softc *); static struct ieee80211vap *rtwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rtwn_vap_delete(struct ieee80211vap *); static int rtwn_read_chipid(struct rtwn_softc *); static int rtwn_ioctl_reset(struct ieee80211vap *, u_long); #ifndef RTWN_WITHOUT_UCODE static void rtwn_set_media_status(struct rtwn_softc *, union sec_param *); static int rtwn_tx_fwpkt_check(struct rtwn_softc *, struct ieee80211vap *); static int rtwn_construct_nulldata(struct rtwn_softc *, struct ieee80211vap *, uint8_t *, int); static int rtwn_push_nulldata(struct rtwn_softc *, struct ieee80211vap *); static void rtwn_pwrmode_init(void *); static void rtwn_set_pwrmode_cb(struct rtwn_softc *, union sec_param *); #endif static void rtwn_tsf_sync_adhoc(void *); static void rtwn_tsf_sync_adhoc_task(void *, int); static void rtwn_tsf_sync_enable(struct rtwn_softc *, struct ieee80211vap *); static void rtwn_set_ack_preamble(struct rtwn_softc *); static void rtwn_set_mode(struct rtwn_softc *, uint8_t, int); static int rtwn_monitor_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int rtwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void rtwn_calc_basicrates(struct rtwn_softc *); static int rtwn_run(struct rtwn_softc *, struct ieee80211vap *); #ifndef D4054 static void rtwn_watchdog(void *); #endif static void rtwn_parent(struct ieee80211com *); -static int rtwn_llt_write(struct rtwn_softc *, uint32_t, - uint32_t); -static int rtwn_llt_init(struct rtwn_softc *); static int rtwn_dma_init(struct rtwn_softc *); static int rtwn_mac_init(struct rtwn_softc *); static void rtwn_mrr_init(struct rtwn_softc *); static void rtwn_scan_start(struct ieee80211com *); static void rtwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void rtwn_scan_end(struct ieee80211com *); static void rtwn_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void rtwn_update_chw(struct ieee80211com *); static void rtwn_set_channel(struct ieee80211com *); static int rtwn_wme_update(struct ieee80211com *); static void rtwn_update_slot(struct ieee80211com *); static void rtwn_update_slot_cb(struct rtwn_softc *, union sec_param *); static void rtwn_update_aifs(struct rtwn_softc *, uint8_t); static void rtwn_update_promisc(struct ieee80211com *); static void rtwn_update_mcast(struct ieee80211com *); static int rtwn_set_bssid(struct rtwn_softc *, const uint8_t *, int); static int rtwn_set_macaddr(struct rtwn_softc *, const uint8_t *, int); static struct ieee80211_node *rtwn_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); static void rtwn_newassoc(struct ieee80211_node *, int); static void rtwn_node_free(struct ieee80211_node *); static void rtwn_init_beacon_reg(struct rtwn_softc *); static int rtwn_init(struct rtwn_softc *); static void rtwn_stop(struct rtwn_softc *); MALLOC_DEFINE(M_RTWN_PRIV, "rtwn_priv", "rtwn driver private state"); static const uint8_t rtwn_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint16_t wme2reg[] = { R92C_EDCA_BE_PARAM, R92C_EDCA_BK_PARAM, R92C_EDCA_VI_PARAM, R92C_EDCA_VO_PARAM }; int rtwn_attach(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; int error; sc->cur_bcnq_id = RTWN_VAP_ID_INVALID; RTWN_NT_LOCK_INIT(sc); rtwn_cmdq_init(sc); #ifndef D4054 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0); #endif callout_init(&sc->sc_calib_to, 0); callout_init(&sc->sc_pwrmode_init, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); RTWN_LOCK(sc); error = rtwn_read_chipid(sc); RTWN_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "unsupported test chip\n"); goto detach; } error = rtwn_read_rom(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot read rom, error %d\n", __func__, error); goto detach; } if (sc->macid_limit > RTWN_MACID_LIMIT) { device_printf(sc->sc_dev, "macid limit will be reduced from %d to %d\n", sc->macid_limit, RTWN_MACID_LIMIT); sc->macid_limit = RTWN_MACID_LIMIT; } if (sc->cam_entry_limit > RTWN_CAM_ENTRY_LIMIT) { device_printf(sc->sc_dev, "cam entry limit will be reduced from %d to %d\n", sc->cam_entry_limit, RTWN_CAM_ENTRY_LIMIT); sc->cam_entry_limit = RTWN_CAM_ENTRY_LIMIT; } if (sc->txdesc_len > RTWN_TX_DESC_SIZE) { device_printf(sc->sc_dev, "adjust size for Tx descriptor (current %d, needed %d)\n", RTWN_TX_DESC_SIZE, sc->txdesc_len); goto detach; } device_printf(sc->sc_dev, "MAC/BB %s, RF 6052 %dT%dR\n", sc->name, sc->ntxchains, sc->nrxchains); ic->ic_softc = sc; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_IBSS /* adhoc mode */ | IEEE80211_C_HOSTAP /* hostap mode */ #if 0 /* TODO: HRPWM register setup */ #ifndef RTWN_WITHOUT_UCODE | IEEE80211_C_PMGT /* Station-side power mgmt */ #endif #endif | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_SWAMSDUTX /* Do software A-MSDU TX */ | IEEE80211_C_FF /* Atheros fast-frames */ ; if (sc->sc_hwcrypto != RTWN_CRYPTO_SW) { ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_AES_CCM; } ic->ic_htcaps = IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ | IEEE80211_HTCAP_SMPS_OFF /* SM PS mode disabled */ /* s/w capabilities */ | IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* A-MPDU tx */ | IEEE80211_HTC_AMSDU /* A-MSDU tx */ ; if (sc->sc_ht40) { ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 /* 40 MHz channel width */ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ ; } ic->ic_txstream = sc->ntxchains; ic->ic_rxstream = sc->nrxchains; /* Enable TX watchdog */ #ifdef D4054 ic->ic_flags_ext |= IEEE80211_FEXT_WATCHDOG; #endif /* Adjust capabilities. */ rtwn_adj_devcaps(sc); rtwn_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); /* XXX TODO: setup regdomain if R92C_CHANNEL_PLAN_BY_HW bit is set. */ ieee80211_ifattach(ic); ic->ic_raw_xmit = rtwn_raw_xmit; ic->ic_scan_start = rtwn_scan_start; sc->sc_scan_curchan = ic->ic_scan_curchan; ic->ic_scan_curchan = rtwn_scan_curchan; ic->ic_scan_end = rtwn_scan_end; ic->ic_getradiocaps = rtwn_getradiocaps; ic->ic_update_chw = rtwn_update_chw; ic->ic_set_channel = rtwn_set_channel; ic->ic_transmit = rtwn_transmit; ic->ic_parent = rtwn_parent; ic->ic_vap_create = rtwn_vap_create; ic->ic_vap_delete = rtwn_vap_delete; ic->ic_wme.wme_update = rtwn_wme_update; ic->ic_updateslot = rtwn_update_slot; ic->ic_update_promisc = rtwn_update_promisc; ic->ic_update_mcast = rtwn_update_mcast; ic->ic_node_alloc = rtwn_node_alloc; ic->ic_newassoc = rtwn_newassoc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = rtwn_node_free; rtwn_postattach(sc); rtwn_radiotap_attach(sc); if (bootverbose) ieee80211_announce(ic); return (0); detach: return (ENXIO); /* failure */ } static void rtwn_radiotap_attach(struct rtwn_softc *sc) { struct rtwn_rx_radiotap_header *rxtap = &sc->sc_rxtap; struct rtwn_tx_radiotap_header *txtap = &sc->sc_txtap; ieee80211_radiotap_attach(&sc->sc_ic, &txtap->wt_ihdr, sizeof(*txtap), RTWN_TX_RADIOTAP_PRESENT, &rxtap->wr_ihdr, sizeof(*rxtap), RTWN_RX_RADIOTAP_PRESENT); } void rtwn_sysctlattach(struct rtwn_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); #if 1 sc->sc_ht40 = 0; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ht40", CTLFLAG_RDTUN, &sc->sc_ht40, sc->sc_ht40, "Enable 40 MHz mode support"); #endif #ifdef RTWN_DEBUG SYSCTL_ADD_U32(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, sc->sc_debug, "Control debugging printfs"); #endif sc->sc_hwcrypto = RTWN_CRYPTO_PAIR; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "hwcrypto", CTLFLAG_RDTUN, &sc->sc_hwcrypto, sc->sc_hwcrypto, "Enable h/w crypto: " "0 - disable, 1 - pairwise keys, 2 - all keys"); if (sc->sc_hwcrypto >= RTWN_CRYPTO_MAX) sc->sc_hwcrypto = RTWN_CRYPTO_FULL; sc->sc_ratectl_sysctl = RTWN_RATECTL_NET80211; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ratectl", CTLFLAG_RDTUN, &sc->sc_ratectl_sysctl, sc->sc_ratectl_sysctl, "Select rate control mechanism: " "0 - disabled, 1 - via net80211, 2 - via firmware"); if (sc->sc_ratectl_sysctl >= RTWN_RATECTL_MAX) sc->sc_ratectl_sysctl = RTWN_RATECTL_FW; sc->sc_ratectl = sc->sc_ratectl_sysctl; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ratectl_selected", CTLFLAG_RD, &sc->sc_ratectl, sc->sc_ratectl, "Currently selected rate control mechanism (by the driver)"); } void rtwn_detach(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_softc == sc) { /* Stop command queue. */ RTWN_CMDQ_LOCK(sc); sc->sc_detached = 1; RTWN_CMDQ_UNLOCK(sc); ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_ifdetach(ic); } rtwn_cmdq_destroy(sc); if (RTWN_NT_LOCK_INITIALIZED(sc)) RTWN_NT_LOCK_DESTROY(sc); } void rtwn_suspend(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; ieee80211_suspend_all(ic); } void rtwn_resume(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; ieee80211_resume_all(ic); } static void rtwn_vap_decrement_counters(struct rtwn_softc *sc, enum ieee80211_opmode opmode, int id) { RTWN_ASSERT_LOCKED(sc); if (id != RTWN_VAP_ID_INVALID) { KASSERT(id == 0 || id == 1, ("wrong vap id %d!\n", id)); KASSERT(sc->vaps[id] != NULL, ("vap pointer is NULL\n")); sc->vaps[id] = NULL; } switch (opmode) { case IEEE80211_M_HOSTAP: sc->ap_vaps--; /* FALLTHROUGH */ case IEEE80211_M_IBSS: sc->bcn_vaps--; /* FALLTHROUGH */ case IEEE80211_M_STA: sc->nvaps--; break; case IEEE80211_M_MONITOR: sc->mon_vaps--; break; default: KASSERT(0, ("wrong opmode %d\n", opmode)); break; } KASSERT(sc->vaps_running >= 0 && sc->monvaps_running >= 0, ("number of running vaps is negative (vaps %d, monvaps %d)\n", sc->vaps_running, sc->monvaps_running)); KASSERT(sc->vaps_running - sc->monvaps_running <= RTWN_PORT_COUNT, ("number of running vaps is too big (vaps %d, monvaps %d)\n", sc->vaps_running, sc->monvaps_running)); KASSERT(sc->nvaps >= 0 && sc->nvaps <= RTWN_PORT_COUNT, ("wrong value %d for nvaps\n", sc->nvaps)); KASSERT(sc->mon_vaps >= 0, ("mon_vaps is negative (%d)\n", sc->mon_vaps)); KASSERT(sc->bcn_vaps >= 0 && ((RTWN_CHIP_HAS_BCNQ1(sc) && sc->bcn_vaps <= RTWN_PORT_COUNT) || sc->bcn_vaps <= 1), ("bcn_vaps value %d is wrong\n", sc->bcn_vaps)); KASSERT(sc->ap_vaps >= 0 && ((RTWN_CHIP_HAS_BCNQ1(sc) && sc->ap_vaps <= RTWN_PORT_COUNT) || sc->ap_vaps <= 1), ("ap_vaps value %d is wrong\n", sc->ap_vaps)); } static void rtwn_set_ic_opmode(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; RTWN_ASSERT_LOCKED(sc); /* for ieee80211_reset_erp() */ if (sc->bcn_vaps - sc->ap_vaps > 0) ic->ic_opmode = IEEE80211_M_IBSS; else if (sc->ap_vaps > 0) ic->ic_opmode = IEEE80211_M_HOSTAP; else if (sc->nvaps > 0) ic->ic_opmode = IEEE80211_M_STA; else ic->ic_opmode = IEEE80211_M_MONITOR; } static struct ieee80211vap * rtwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rtwn_softc *sc = ic->ic_softc; struct rtwn_vap *uvp; struct ieee80211vap *vap; int id = RTWN_VAP_ID_INVALID; RTWN_LOCK(sc); KASSERT(sc->nvaps <= RTWN_PORT_COUNT, ("nvaps overflow (%d > %d)\n", sc->nvaps, RTWN_PORT_COUNT)); KASSERT(sc->ap_vaps <= RTWN_PORT_COUNT, ("ap_vaps overflow (%d > %d)\n", sc->ap_vaps, RTWN_PORT_COUNT)); KASSERT(sc->bcn_vaps <= RTWN_PORT_COUNT, ("bcn_vaps overflow (%d > %d)\n", sc->bcn_vaps, RTWN_PORT_COUNT)); if (opmode != IEEE80211_M_MONITOR) { switch (sc->nvaps) { case 0: id = 0; break; case 1: if (sc->vaps[1] == NULL) id = 1; else if (sc->vaps[0] == NULL) id = 0; KASSERT(id != RTWN_VAP_ID_INVALID, ("no free ports left\n")); break; case 2: default: goto fail; } if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { if ((sc->bcn_vaps == 1 && !RTWN_CHIP_HAS_BCNQ1(sc)) || sc->bcn_vaps == RTWN_PORT_COUNT) goto fail; } } switch (opmode) { case IEEE80211_M_HOSTAP: sc->ap_vaps++; /* FALLTHROUGH */ case IEEE80211_M_IBSS: sc->bcn_vaps++; /* FALLTHROUGH */ case IEEE80211_M_STA: sc->nvaps++; break; case IEEE80211_M_MONITOR: sc->mon_vaps++; break; default: KASSERT(0, ("unknown opmode %d\n", opmode)); goto fail; } RTWN_UNLOCK(sc); uvp = malloc(sizeof(struct rtwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); uvp->id = id; if (id != RTWN_VAP_ID_INVALID) { RTWN_LOCK(sc); sc->vaps[id] = uvp; RTWN_UNLOCK(sc); } vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); RTWN_LOCK(sc); rtwn_vap_decrement_counters(sc, opmode, id); RTWN_UNLOCK(sc); return (NULL); } rtwn_beacon_init(sc, &uvp->bcn_desc.txd[0], uvp->id); rtwn_vap_preattach(sc, vap); /* override state transition machine */ uvp->newstate = vap->iv_newstate; if (opmode == IEEE80211_M_MONITOR) vap->iv_newstate = rtwn_monitor_newstate; else vap->iv_newstate = rtwn_newstate; vap->iv_update_beacon = rtwn_update_beacon; vap->iv_reset = rtwn_ioctl_reset; vap->iv_key_alloc = rtwn_key_alloc; vap->iv_key_set = rtwn_key_set; vap->iv_key_delete = rtwn_key_delete; vap->iv_max_aid = sc->macid_limit; /* 802.11n parameters */ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_16; vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; TIMEOUT_TASK_INIT(taskqueue_thread, &uvp->tx_beacon_csa, 0, rtwn_tx_beacon_csa, vap); if (opmode == IEEE80211_M_IBSS) { uvp->recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = rtwn_adhoc_recv_mgmt; TASK_INIT(&uvp->tsf_sync_adhoc_task, 0, rtwn_tsf_sync_adhoc_task, vap); callout_init(&uvp->tsf_sync_adhoc, 0); } /* * NB: driver can select net80211 RA even when user requests * another mechanism. */ ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); RTWN_LOCK(sc); rtwn_set_ic_opmode(sc); if (sc->sc_flags & RTWN_RUNNING) { if (uvp->id != RTWN_VAP_ID_INVALID) rtwn_set_macaddr(sc, vap->iv_myaddr, uvp->id); rtwn_rxfilter_update(sc); } RTWN_UNLOCK(sc); return (vap); fail: RTWN_UNLOCK(sc); return (NULL); } static void rtwn_vap_delete(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rtwn_softc *sc = ic->ic_softc; struct rtwn_vap *uvp = RTWN_VAP(vap); /* Put vap into INIT state + stop device if needed. */ ieee80211_stop(vap); ieee80211_draintask(ic, &vap->iv_nstate_task); ieee80211_draintask(ic, &ic->ic_parent_task); RTWN_LOCK(sc); /* Cancel any unfinished Tx. */ rtwn_reset_lists(sc, vap); if (uvp->bcn_mbuf != NULL) m_freem(uvp->bcn_mbuf); rtwn_vap_decrement_counters(sc, vap->iv_opmode, uvp->id); rtwn_set_ic_opmode(sc); if (sc->sc_flags & RTWN_RUNNING) rtwn_rxfilter_update(sc); RTWN_UNLOCK(sc); if (vap->iv_opmode == IEEE80211_M_IBSS) { ieee80211_draintask(ic, &uvp->tsf_sync_adhoc_task); callout_drain(&uvp->tsf_sync_adhoc); } ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static int rtwn_read_chipid(struct rtwn_softc *sc) { uint32_t reg; reg = rtwn_read_4(sc, R92C_SYS_CFG); if (reg & R92C_SYS_CFG_TRP_VAUX_EN) /* test chip */ return (EOPNOTSUPP); rtwn_read_chipid_vendor(sc, reg); return (0); } static int rtwn_ioctl_reset(struct ieee80211vap *vap, u_long cmd) { int error; switch (cmd) { #ifndef RTWN_WITHOUT_UCODE case IEEE80211_IOC_POWERSAVE: case IEEE80211_IOC_POWERSAVESLEEP: { struct rtwn_softc *sc = vap->iv_ic->ic_softc; struct rtwn_vap *uvp = RTWN_VAP(vap); if (vap->iv_opmode == IEEE80211_M_STA && uvp->id == 0) { RTWN_LOCK(sc); if (sc->sc_flags & RTWN_RUNNING) error = rtwn_set_pwrmode(sc, vap, 1); else error = 0; RTWN_UNLOCK(sc); if (error != 0) error = ENETRESET; } else error = EOPNOTSUPP; break; } #endif case IEEE80211_IOC_SHORTGI: case IEEE80211_IOC_RTSTHRESHOLD: case IEEE80211_IOC_PROTMODE: case IEEE80211_IOC_HTPROTMODE: case IEEE80211_IOC_LDPC: error = 0; break; default: error = ENETRESET; break; } return (error); } #ifndef RTWN_WITHOUT_UCODE static void rtwn_set_media_status(struct rtwn_softc *sc, union sec_param *data) { sc->sc_set_media_status(sc, data->macid); } static int rtwn_tx_fwpkt_check(struct rtwn_softc *sc, struct ieee80211vap *vap) { int ntries, error; for (ntries = 0; ntries < 5; ntries++) { error = rtwn_push_nulldata(sc, vap); if (error == 0) break; } if (ntries == 5) { device_printf(sc->sc_dev, "%s: cannot push f/w frames into chip, error %d!\n", __func__, error); return (error); } return (0); } static int rtwn_construct_nulldata(struct rtwn_softc *sc, struct ieee80211vap *vap, uint8_t *ptr, int qos) { struct rtwn_vap *uvp = RTWN_VAP(vap); struct ieee80211com *ic = &sc->sc_ic; struct rtwn_tx_desc_common *txd; struct ieee80211_frame *wh; int pktlen; /* XXX obtain from net80211 */ wh = (struct ieee80211_frame *)(ptr + sc->txdesc_len); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_bss->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bss->ni_macaddr); txd = (struct rtwn_tx_desc_common *)ptr; txd->offset = sc->txdesc_len; pktlen = sc->txdesc_len; if (qos) { struct ieee80211_qosframe *qwh; const int tid = WME_AC_TO_TID(WME_AC_BE); qwh = (struct ieee80211_qosframe *)wh; qwh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS_NULL; qwh->i_qos[0] = tid & IEEE80211_QOS_TID; txd->pktlen = htole16(sizeof(struct ieee80211_qosframe)); pktlen += sizeof(struct ieee80211_qosframe); } else { wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_NODATA; txd->pktlen = htole16(sizeof(struct ieee80211_frame)); pktlen += sizeof(struct ieee80211_frame); } rtwn_fill_tx_desc_null(sc, ptr, ic->ic_curmode == IEEE80211_MODE_11B, qos, uvp->id); return (pktlen); } static int rtwn_push_nulldata(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct rtwn_vap *uvp = RTWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ieee80211_channel *c = ic->ic_curchan; struct mbuf *m; uint8_t *ptr; int required_size, bcn_size, null_size, null_data, error; if (!(sc->sc_flags & RTWN_FW_LOADED)) return (0); /* requires firmware */ KASSERT(sc->page_size > 0, ("page size was not set!\n")); /* Leave some space for beacon (multi-vap) */ bcn_size = roundup(RTWN_BCN_MAX_SIZE, sc->page_size); /* 1 page for Null Data + 1 page for Qos Null Data frames. */ required_size = bcn_size + sc->page_size * 2; m = m_get2(required_size, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOMEM); /* Setup beacon descriptor. */ rtwn_beacon_set_rate(sc, &uvp->bcn_desc.txd[0], IEEE80211_IS_CHAN_5GHZ(c)); ptr = mtod(m, uint8_t *); memset(ptr, 0, required_size - sc->txdesc_len); /* Construct Null Data frame. */ ptr += bcn_size - sc->txdesc_len; null_size = rtwn_construct_nulldata(sc, vap, ptr, 0); KASSERT(null_size < sc->page_size, ("recalculate size for Null Data frame\n")); /* Construct Qos Null Data frame. */ ptr += roundup(null_size, sc->page_size); null_size = rtwn_construct_nulldata(sc, vap, ptr, 1); KASSERT(null_size < sc->page_size, ("recalculate size for Qos Null Data frame\n")); /* Do not try to detect a beacon here. */ rtwn_setbits_1_shift(sc, R92C_CR, 0, R92C_CR_ENSWBCN, 1); rtwn_setbits_1_shift(sc, R92C_FWHW_TXQ_CTRL, R92C_FWHW_TXQ_CTRL_REAL_BEACON, 0, 2); if (uvp->bcn_mbuf != NULL) { rtwn_beacon_unload(sc, uvp->id); m_freem(uvp->bcn_mbuf); } m->m_pkthdr.len = m->m_len = required_size - sc->txdesc_len; uvp->bcn_mbuf = m; error = rtwn_tx_beacon_check(sc, uvp); if (error != 0) { RTWN_DPRINTF(sc, RTWN_DEBUG_BEACON, "%s: frame was not recognized!\n", __func__); goto fail; } /* Setup addresses in firmware. */ null_data = howmany(bcn_size, sc->page_size); error = rtwn_set_rsvd_page(sc, 0, null_data, null_data + 1); if (error != 0) { device_printf(sc->sc_dev, "%s: CMD_RSVD_PAGE was not sent, error %d\n", __func__, error); goto fail; } fail: /* Re-enable beacon detection. */ rtwn_setbits_1_shift(sc, R92C_FWHW_TXQ_CTRL, 0, R92C_FWHW_TXQ_CTRL_REAL_BEACON, 2); rtwn_setbits_1_shift(sc, R92C_CR, R92C_CR_ENSWBCN, 0, 1); /* Restore beacon (if present). */ if (sc->bcn_vaps > 0 && sc->vaps[!uvp->id] != NULL) { struct rtwn_vap *uvp2 = sc->vaps[!uvp->id]; if (uvp2->curr_mode != R92C_MSR_NOLINK) error = rtwn_tx_beacon_check(sc, uvp2); } return (error); } static void rtwn_pwrmode_init(void *arg) { struct rtwn_softc *sc = arg; rtwn_cmd_sleepable(sc, NULL, 0, rtwn_set_pwrmode_cb); } static void rtwn_set_pwrmode_cb(struct rtwn_softc *sc, union sec_param *data) { struct ieee80211vap *vap = &sc->vaps[0]->vap; if (vap != NULL) rtwn_set_pwrmode(sc, vap, 1); } #endif static void rtwn_tsf_sync_adhoc(void *arg) { struct ieee80211vap *vap = arg; struct ieee80211com *ic = vap->iv_ic; struct rtwn_vap *uvp = RTWN_VAP(vap); if (uvp->curr_mode != R92C_MSR_NOLINK) { /* Do it in process context. */ ieee80211_runtask(ic, &uvp->tsf_sync_adhoc_task); } } /* * Workaround for TSF synchronization: * when BSSID filter in IBSS mode is not set * (and TSF synchronization is enabled), then any beacon may update it. * This routine synchronizes it when BSSID matching is enabled (IBSS merge * is not possible during this period). * * NOTE: there is no race with rtwn_newstate(), since it uses the same * taskqueue. */ static void rtwn_tsf_sync_adhoc_task(void *arg, int pending) { struct ieee80211vap *vap = arg; struct rtwn_vap *uvp = RTWN_VAP(vap); struct rtwn_softc *sc = vap->iv_ic->ic_softc; struct ieee80211_node *ni; RTWN_LOCK(sc); ni = ieee80211_ref_node(vap->iv_bss); /* Accept beacons with the same BSSID. */ rtwn_set_rx_bssid_all(sc, 0); /* Deny RCR updates. */ sc->sc_flags |= RTWN_RCR_LOCKED; /* Enable synchronization. */ rtwn_setbits_1(sc, R92C_BCN_CTRL(uvp->id), R92C_BCN_CTRL_DIS_TSF_UDT0, 0); /* Synchronize. */ rtwn_delay(sc, ni->ni_intval * 5 * 1000); /* Disable synchronization. */ rtwn_setbits_1(sc, R92C_BCN_CTRL(uvp->id), 0, R92C_BCN_CTRL_DIS_TSF_UDT0); /* Accept all beacons. */ sc->sc_flags &= ~RTWN_RCR_LOCKED; rtwn_set_rx_bssid_all(sc, 1); /* Schedule next TSF synchronization. */ callout_reset(&uvp->tsf_sync_adhoc, 60*hz, rtwn_tsf_sync_adhoc, vap); ieee80211_free_node(ni); RTWN_UNLOCK(sc); } static void rtwn_tsf_sync_enable(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = &sc->sc_ic; struct rtwn_vap *uvp = RTWN_VAP(vap); /* Reset TSF. */ rtwn_write_1(sc, R92C_DUAL_TSF_RST, R92C_DUAL_TSF_RESET(uvp->id)); switch (vap->iv_opmode) { case IEEE80211_M_STA: /* Enable TSF synchronization. */ rtwn_setbits_1(sc, R92C_BCN_CTRL(uvp->id), R92C_BCN_CTRL_DIS_TSF_UDT0, 0); break; case IEEE80211_M_IBSS: ieee80211_runtask(ic, &uvp->tsf_sync_adhoc_task); /* FALLTHROUGH */ case IEEE80211_M_HOSTAP: /* Enable beaconing. */ rtwn_beacon_enable(sc, uvp->id, 1); break; default: device_printf(sc->sc_dev, "undefined opmode %d\n", vap->iv_opmode); return; } } static void rtwn_set_ack_preamble(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t reg; reg = rtwn_read_4(sc, R92C_WMAC_TRXPTCL_CTL); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) reg |= R92C_WMAC_TRXPTCL_SHPRE; else reg &= ~R92C_WMAC_TRXPTCL_SHPRE; rtwn_write_4(sc, R92C_WMAC_TRXPTCL_CTL, reg); } static void rtwn_set_mode(struct rtwn_softc *sc, uint8_t mode, int id) { rtwn_setbits_1(sc, R92C_MSR, R92C_MSR_MASK << id * 2, mode << id * 2); if (sc->vaps[id] != NULL) sc->vaps[id]->curr_mode = mode; } static int rtwn_monitor_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct rtwn_softc *sc = ic->ic_softc; struct rtwn_vap *uvp = RTWN_VAP(vap); RTWN_DPRINTF(sc, RTWN_DEBUG_STATE, "%s -> %s\n", ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); if (vap->iv_state != nstate) { IEEE80211_UNLOCK(ic); RTWN_LOCK(sc); switch (nstate) { case IEEE80211_S_INIT: sc->vaps_running--; sc->monvaps_running--; if (sc->vaps_running == 0) { /* Turn link LED off. */ rtwn_set_led(sc, RTWN_LED_LINK, 0); } break; case IEEE80211_S_RUN: sc->vaps_running++; sc->monvaps_running++; if (sc->vaps_running == 1) { /* Turn link LED on. */ rtwn_set_led(sc, RTWN_LED_LINK, 1); } break; default: /* NOTREACHED */ break; } RTWN_UNLOCK(sc); IEEE80211_LOCK(ic); } return (uvp->newstate(vap, nstate, arg)); } static int rtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rtwn_vap *uvp = RTWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rtwn_softc *sc = ic->ic_softc; enum ieee80211_state ostate; int error, early_newstate; ostate = vap->iv_state; RTWN_DPRINTF(sc, RTWN_DEBUG_STATE, "%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); if (vap->iv_bss->ni_chan == IEEE80211_CHAN_ANYC && ostate == IEEE80211_S_INIT && nstate == IEEE80211_S_RUN) { /* need to call iv_newstate() firstly */ error = uvp->newstate(vap, nstate, arg); if (error != 0) return (error); early_newstate = 1; } else early_newstate = 0; if (ostate == IEEE80211_S_CSA) { taskqueue_cancel_timeout(taskqueue_thread, &uvp->tx_beacon_csa, NULL); /* * In multi-vap case second counter may not be cleared * properly. */ vap->iv_csa_count = 0; } IEEE80211_UNLOCK(ic); RTWN_LOCK(sc); if (ostate == IEEE80211_S_CSA) { /* Unblock all queues (multi-vap case). */ rtwn_write_1(sc, R92C_TXPAUSE, 0); } if ((ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_CSA) || ostate == IEEE80211_S_CSA) { sc->vaps_running--; /* Set media status to 'No Link'. */ rtwn_set_mode(sc, R92C_MSR_NOLINK, uvp->id); if (vap->iv_opmode == IEEE80211_M_IBSS) { /* Stop periodical TSF synchronization. */ callout_stop(&uvp->tsf_sync_adhoc); } /* Disable TSF synchronization / beaconing. */ rtwn_beacon_enable(sc, uvp->id, 0); rtwn_setbits_1(sc, R92C_BCN_CTRL(uvp->id), 0, R92C_BCN_CTRL_DIS_TSF_UDT0); /* NB: monitor mode vaps are using port 0. */ if (uvp->id != 0 || sc->monvaps_running == 0) { /* Reset TSF. */ rtwn_write_1(sc, R92C_DUAL_TSF_RST, R92C_DUAL_TSF_RESET(uvp->id)); } #ifndef RTWN_WITHOUT_UCODE if ((ic->ic_caps & IEEE80211_C_PMGT) != 0 && uvp->id == 0) { /* Disable power management. */ callout_stop(&sc->sc_pwrmode_init); rtwn_set_pwrmode(sc, vap, 0); } #endif if (sc->vaps_running - sc->monvaps_running > 0) { /* Recalculate basic rates bitmap. */ rtwn_calc_basicrates(sc); } if (sc->vaps_running == sc->monvaps_running) { /* Stop calibration. */ callout_stop(&sc->sc_calib_to); /* Stop Rx of data frames. */ rtwn_write_2(sc, R92C_RXFLTMAP2, 0); /* Reset EDCA parameters. */ rtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002f3217); rtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005e4317); rtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x00105320); rtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a444); if (sc->vaps_running == 0) { /* Turn link LED off. */ rtwn_set_led(sc, RTWN_LED_LINK, 0); } } } error = 0; switch (nstate) { case IEEE80211_S_SCAN: /* Pause AC Tx queues. */ if (sc->vaps_running == 0) rtwn_setbits_1(sc, R92C_TXPAUSE, 0, R92C_TX_QUEUE_AC); break; case IEEE80211_S_RUN: error = rtwn_run(sc, vap); if (error != 0) { device_printf(sc->sc_dev, "%s: could not move to RUN state\n", __func__); break; } sc->vaps_running++; break; case IEEE80211_S_CSA: /* Block all Tx queues (except beacon queue). */ rtwn_setbits_1(sc, R92C_TXPAUSE, 0, R92C_TX_QUEUE_AC | R92C_TX_QUEUE_MGT | R92C_TX_QUEUE_HIGH); break; default: break; } RTWN_UNLOCK(sc); IEEE80211_LOCK(ic); if (error != 0) return (error); return (early_newstate ? 0 : uvp->newstate(vap, nstate, arg)); } static void rtwn_calc_basicrates(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t basicrates; int i; RTWN_ASSERT_LOCKED(sc); if (ic->ic_flags & IEEE80211_F_SCAN) return; /* will be done by rtwn_scan_end(). */ basicrates = 0; for (i = 0; i < nitems(sc->vaps); i++) { struct rtwn_vap *rvp; struct ieee80211vap *vap; struct ieee80211_node *ni; uint32_t rates; rvp = sc->vaps[i]; if (rvp == NULL || rvp->curr_mode == R92C_MSR_NOLINK) continue; vap = &rvp->vap; if (vap->iv_bss == NULL) continue; ni = ieee80211_ref_node(vap->iv_bss); rtwn_get_rates(sc, &ni->ni_rates, NULL, &rates, NULL, 1); basicrates |= rates; ieee80211_free_node(ni); } if (basicrates == 0) return; /* XXX initial RTS rate? */ rtwn_set_basicrates(sc, basicrates); } static int rtwn_run(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rtwn_vap *uvp = RTWN_VAP(vap); struct ieee80211_node *ni; uint8_t mode; int error; RTWN_ASSERT_LOCKED(sc); error = 0; ni = ieee80211_ref_node(vap->iv_bss); if (ic->ic_bsschan == IEEE80211_CHAN_ANYC || ni->ni_chan == IEEE80211_CHAN_ANYC) { error = EINVAL; goto fail; } switch (vap->iv_opmode) { case IEEE80211_M_STA: mode = R92C_MSR_INFRA; break; case IEEE80211_M_IBSS: mode = R92C_MSR_ADHOC; break; case IEEE80211_M_HOSTAP: mode = R92C_MSR_AP; break; default: KASSERT(0, ("undefined opmode %d\n", vap->iv_opmode)); error = EINVAL; goto fail; } /* Set media status to 'Associated'. */ rtwn_set_mode(sc, mode, uvp->id); /* Set AssocID. */ /* XXX multi-vap? */ rtwn_write_2(sc, R92C_BCN_PSR_RPT, 0xc000 | IEEE80211_NODE_AID(ni)); /* Set BSSID. */ rtwn_set_bssid(sc, ni->ni_bssid, uvp->id); /* Set beacon interval. */ rtwn_write_2(sc, R92C_BCN_INTERVAL(uvp->id), ni->ni_intval); if (sc->vaps_running == sc->monvaps_running) { /* Enable Rx of data frames. */ rtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff); /* Flush all AC queues. */ rtwn_write_1(sc, R92C_TXPAUSE, 0); } #ifndef RTWN_WITHOUT_UCODE /* Upload (QoS) Null Data frame to firmware. */ /* Note: do this for port 0 only. */ if ((ic->ic_caps & IEEE80211_C_PMGT) != 0 && vap->iv_opmode == IEEE80211_M_STA && uvp->id == 0) { error = rtwn_tx_fwpkt_check(sc, vap); if (error != 0) goto fail; /* Setup power management. */ /* * NB: it will be enabled immediately - delay it, * so 4-Way handshake will not be interrupted. */ callout_reset(&sc->sc_pwrmode_init, 5*hz, rtwn_pwrmode_init, sc); } #endif /* Enable TSF synchronization. */ rtwn_tsf_sync_enable(sc, vap); if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { error = rtwn_setup_beacon(sc, ni); if (error != 0) { device_printf(sc->sc_dev, "unable to push beacon into the chip, " "error %d\n", error); goto fail; } } /* Set ACK preamble type. */ rtwn_set_ack_preamble(sc); /* Set basic rates mask. */ rtwn_calc_basicrates(sc); #ifdef RTWN_TODO rtwn_write_1(sc, R92C_SIFS_CCK + 1, 10); rtwn_write_1(sc, R92C_SIFS_OFDM + 1, 10); rtwn_write_1(sc, R92C_SPEC_SIFS + 1, 10); rtwn_write_1(sc, R92C_MAC_SPEC_SIFS + 1, 10); rtwn_write_1(sc, R92C_R2T_SIFS + 1, 10); rtwn_write_1(sc, R92C_T2T_SIFS + 1, 10); #endif if (sc->vaps_running == sc->monvaps_running) { /* Reset temperature calibration state machine. */ sc->sc_flags &= ~RTWN_TEMP_MEASURED; sc->thcal_temp = sc->thermal_meter; /* Start periodic calibration. */ callout_reset(&sc->sc_calib_to, 2*hz, rtwn_calib_to, sc); if (sc->vaps_running == 0) { /* Turn link LED on. */ rtwn_set_led(sc, RTWN_LED_LINK, 1); } } fail: ieee80211_free_node(ni); return (error); } #ifndef D4054 static void rtwn_watchdog(void *arg) { struct rtwn_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; RTWN_ASSERT_LOCKED(sc); KASSERT(sc->sc_flags & RTWN_RUNNING, ("not running")); if (sc->sc_tx_timer != 0 && --sc->sc_tx_timer == 0) { ic_printf(ic, "device timeout\n"); ieee80211_restart_all(ic); return; } callout_reset(&sc->sc_watchdog_to, hz, rtwn_watchdog, sc); } #endif static void rtwn_parent(struct ieee80211com *ic) { struct rtwn_softc *sc = ic->ic_softc; struct ieee80211vap *vap; if (ic->ic_nrunning > 0) { if (rtwn_init(sc) != 0) { IEEE80211_LOCK(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) ieee80211_stop_locked(vap); IEEE80211_UNLOCK(ic); } else ieee80211_start_all(ic); } else rtwn_stop(sc); -} - - -static int -rtwn_llt_write(struct rtwn_softc *sc, uint32_t addr, uint32_t data) -{ - int ntries, error; - - error = rtwn_write_4(sc, R92C_LLT_INIT, - SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | - SM(R92C_LLT_INIT_ADDR, addr) | - SM(R92C_LLT_INIT_DATA, data)); - if (error != 0) - return (error); - /* Wait for write operation to complete. */ - for (ntries = 0; ntries < 20; ntries++) { - if (MS(rtwn_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == - R92C_LLT_INIT_OP_NO_ACTIVE) - return (0); - rtwn_delay(sc, 10); - } - return (ETIMEDOUT); -} - -static int -rtwn_llt_init(struct rtwn_softc *sc) -{ - int i, error; - - /* Reserve pages [0; page_count]. */ - for (i = 0; i < sc->page_count; i++) { - if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) - return (error); - } - /* NB: 0xff indicates end-of-list. */ - if ((error = rtwn_llt_write(sc, i, 0xff)) != 0) - return (error); - /* - * Use pages [page_count + 1; pktbuf_count - 1] - * as ring buffer. - */ - for (++i; i < sc->pktbuf_count - 1; i++) { - if ((error = rtwn_llt_write(sc, i, i + 1)) != 0) - return (error); - } - /* Make the last page point to the beginning of the ring buffer. */ - error = rtwn_llt_write(sc, i, sc->page_count + 1); - return (error); } static int rtwn_dma_init(struct rtwn_softc *sc) { #define RTWN_CHK(res) do { \ if (res != 0) \ return (EIO); \ } while(0) uint16_t reg; uint8_t tx_boundary; int error; /* Initialize LLT table. */ error = rtwn_llt_init(sc); if (error != 0) return (error); /* Set the number of pages for each queue. */ RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "%s: pages per queue: high %d, normal %d, low %d, public %d\n", __func__, sc->nhqpages, sc->nnqpages, sc->nlqpages, sc->npubqpages); RTWN_CHK(rtwn_write_1(sc, R92C_RQPN_NPQ, sc->nnqpages)); RTWN_CHK(rtwn_write_4(sc, R92C_RQPN, /* Set number of pages for public queue. */ SM(R92C_RQPN_PUBQ, sc->npubqpages) | /* Set number of pages for high priority queue. */ SM(R92C_RQPN_HPQ, sc->nhqpages) | /* Set number of pages for low priority queue. */ SM(R92C_RQPN_LPQ, sc->nlqpages) | /* Load values. */ R92C_RQPN_LD)); /* Initialize TX buffer boundary. */ KASSERT(sc->page_count < 255 && sc->page_count > 0, ("page_count is %d\n", sc->page_count)); tx_boundary = sc->page_count + 1; RTWN_CHK(rtwn_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, tx_boundary)); RTWN_CHK(rtwn_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, tx_boundary)); RTWN_CHK(rtwn_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, tx_boundary)); RTWN_CHK(rtwn_write_1(sc, R92C_TRXFF_BNDY, tx_boundary)); RTWN_CHK(rtwn_write_1(sc, R92C_TDECTRL + 1, tx_boundary)); error = rtwn_init_bcnq1_boundary(sc); if (error != 0) return (error); /* Set queue to USB pipe mapping. */ /* Note: PCIe devices are using some magic number here. */ reg = rtwn_get_qmap(sc); RTWN_CHK(rtwn_setbits_2(sc, R92C_TRXDMA_CTRL, R92C_TRXDMA_CTRL_QMAP_M, reg)); /* Configure Tx/Rx DMA (PCIe). */ rtwn_set_desc_addr(sc); /* Set Tx/Rx transfer page boundary. */ RTWN_CHK(rtwn_write_2(sc, R92C_TRXFF_BNDY + 2, sc->rx_dma_size - 1)); /* Set Tx/Rx transfer page size. */ rtwn_set_page_size(sc); return (0); } static int rtwn_mac_init(struct rtwn_softc *sc) { int i, error; /* Write MAC initialization values. */ for (i = 0; i < sc->mac_size; i++) { error = rtwn_write_1(sc, sc->mac_prog[i].reg, sc->mac_prog[i].val); if (error != 0) return (error); } return (0); } static void rtwn_mrr_init(struct rtwn_softc *sc) { int i; /* Drop rate index by 1 per retry. */ for (i = 0; i < R92C_DARFRC_SIZE; i++) { rtwn_write_1(sc, R92C_DARFRC + i, i + 1); rtwn_write_1(sc, R92C_RARFRC + i, i + 1); } } static void rtwn_scan_start(struct ieee80211com *ic) { struct rtwn_softc *sc = ic->ic_softc; RTWN_LOCK(sc); /* Pause beaconing. */ rtwn_setbits_1(sc, R92C_TXPAUSE, 0, R92C_TX_QUEUE_BCN); /* Receive beacons / probe responses from any BSSID. */ if (sc->bcn_vaps == 0) rtwn_set_rx_bssid_all(sc, 1); RTWN_UNLOCK(sc); } static void rtwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct rtwn_softc *sc = ss->ss_ic->ic_softc; /* Make link LED blink during scan. */ RTWN_LOCK(sc); rtwn_set_led(sc, RTWN_LED_LINK, !sc->ledlink); RTWN_UNLOCK(sc); sc->sc_scan_curchan(ss, maxdwell); } static void rtwn_scan_end(struct ieee80211com *ic) { struct rtwn_softc *sc = ic->ic_softc; RTWN_LOCK(sc); /* Restore limitations. */ if (ic->ic_promisc == 0 && sc->bcn_vaps == 0) rtwn_set_rx_bssid_all(sc, 0); /* Restore LED state. */ rtwn_set_led(sc, RTWN_LED_LINK, (sc->vaps_running != 0)); /* Restore basic rates mask. */ rtwn_calc_basicrates(sc); /* Resume beaconing. */ rtwn_setbits_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_BCN, 0); RTWN_UNLOCK(sc); } static void rtwn_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rtwn_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; int i; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); setbit(bands, IEEE80211_MODE_11NG); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rtwn_chan_2ghz, nitems(rtwn_chan_2ghz), bands, !!(ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40)); /* XXX workaround add_channel_list() limitations */ setbit(bands, IEEE80211_MODE_11A); setbit(bands, IEEE80211_MODE_11NA); for (i = 0; i < nitems(sc->chan_num_5ghz); i++) { if (sc->chan_num_5ghz[i] == 0) continue; ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, sc->chan_list_5ghz[i], sc->chan_num_5ghz[i], bands, !!(ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40)); } } static void rtwn_update_chw(struct ieee80211com *ic) { } static void rtwn_set_channel(struct ieee80211com *ic) { struct rtwn_softc *sc = ic->ic_softc; struct ieee80211_channel *c = ic->ic_curchan; RTWN_LOCK(sc); rtwn_set_chan(sc, c); sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); RTWN_UNLOCK(sc); } static int rtwn_wme_update(struct ieee80211com *ic) { struct ieee80211_channel *c = ic->ic_curchan; struct rtwn_softc *sc = ic->ic_softc; struct wmeParams *wmep = sc->cap_wmeParams; uint8_t aifs, acm, slottime; int ac; /* Prevent possible races. */ IEEE80211_LOCK(ic); /* XXX */ RTWN_LOCK(sc); memcpy(wmep, ic->ic_wme.wme_chanParams.cap_wmeParams, sizeof(sc->cap_wmeParams)); RTWN_UNLOCK(sc); IEEE80211_UNLOCK(ic); acm = 0; slottime = IEEE80211_GET_SLOTTIME(ic); RTWN_LOCK(sc); for (ac = WME_AC_BE; ac < WME_NUM_AC; ac++) { /* AIFS[AC] = AIFSN[AC] * aSlotTime + aSIFSTime. */ aifs = wmep[ac].wmep_aifsn * slottime + (IEEE80211_IS_CHAN_5GHZ(c) ? IEEE80211_DUR_OFDM_SIFS : IEEE80211_DUR_SIFS); rtwn_write_4(sc, wme2reg[ac], SM(R92C_EDCA_PARAM_TXOP, wmep[ac].wmep_txopLimit) | SM(R92C_EDCA_PARAM_ECWMIN, wmep[ac].wmep_logcwmin) | SM(R92C_EDCA_PARAM_ECWMAX, wmep[ac].wmep_logcwmax) | SM(R92C_EDCA_PARAM_AIFS, aifs)); if (ac != WME_AC_BE) acm |= wmep[ac].wmep_acm << ac; } if (acm != 0) acm |= R92C_ACMHWCTRL_EN; rtwn_setbits_1(sc, R92C_ACMHWCTRL, R92C_ACMHWCTRL_ACM_MASK, acm); RTWN_UNLOCK(sc); return 0; } static void rtwn_update_slot(struct ieee80211com *ic) { rtwn_cmd_sleepable(ic->ic_softc, NULL, 0, rtwn_update_slot_cb); } static void rtwn_update_slot_cb(struct rtwn_softc *sc, union sec_param *data) { struct ieee80211com *ic = &sc->sc_ic; uint8_t slottime; slottime = IEEE80211_GET_SLOTTIME(ic); RTWN_DPRINTF(sc, RTWN_DEBUG_STATE, "%s: setting slot time to %uus\n", __func__, slottime); rtwn_write_1(sc, R92C_SLOT, slottime); rtwn_update_aifs(sc, slottime); } static void rtwn_update_aifs(struct rtwn_softc *sc, uint8_t slottime) { struct ieee80211_channel *c = sc->sc_ic.ic_curchan; const struct wmeParams *wmep = sc->cap_wmeParams; uint8_t aifs, ac; for (ac = WME_AC_BE; ac < WME_NUM_AC; ac++) { /* AIFS[AC] = AIFSN[AC] * aSlotTime + aSIFSTime. */ aifs = wmep[ac].wmep_aifsn * slottime + (IEEE80211_IS_CHAN_5GHZ(c) ? IEEE80211_DUR_OFDM_SIFS : IEEE80211_DUR_SIFS); rtwn_write_1(sc, wme2reg[ac], aifs); } } static void rtwn_update_promisc(struct ieee80211com *ic) { struct rtwn_softc *sc = ic->ic_softc; RTWN_LOCK(sc); if (sc->sc_flags & RTWN_RUNNING) rtwn_set_promisc(sc); RTWN_UNLOCK(sc); } static void rtwn_update_mcast(struct ieee80211com *ic) { struct rtwn_softc *sc = ic->ic_softc; RTWN_LOCK(sc); if (sc->sc_flags & RTWN_RUNNING) rtwn_set_multi(sc); RTWN_UNLOCK(sc); } static int rtwn_set_bssid(struct rtwn_softc *sc, const uint8_t *bssid, int id) { int error; error = rtwn_write_4(sc, R92C_BSSID(id), le32dec(&bssid[0])); if (error != 0) return (error); error = rtwn_write_2(sc, R92C_BSSID(id) + 4, le16dec(&bssid[4])); return (error); } static int rtwn_set_macaddr(struct rtwn_softc *sc, const uint8_t *addr, int id) { int error; error = rtwn_write_4(sc, R92C_MACID(id), le32dec(&addr[0])); if (error != 0) return (error); error = rtwn_write_2(sc, R92C_MACID(id) + 4, le16dec(&addr[4])); return (error); } static struct ieee80211_node * rtwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rtwn_node *un; un = malloc(sizeof (struct rtwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (un == NULL) return NULL; un->id = RTWN_MACID_UNDEFINED; un->avg_pwdb = -1; return &un->ni; } static void rtwn_newassoc(struct ieee80211_node *ni, int isnew) { struct rtwn_softc *sc = ni->ni_ic->ic_softc; struct rtwn_node *un = RTWN_NODE(ni); int id; if (!isnew) return; RTWN_NT_LOCK(sc); for (id = 0; id <= sc->macid_limit; id++) { if (id != RTWN_MACID_BC && sc->node_list[id] == NULL) { un->id = id; sc->node_list[id] = ni; break; } } RTWN_NT_UNLOCK(sc); if (id > sc->macid_limit) { device_printf(sc->sc_dev, "%s: node table is full\n", __func__); return; } #ifndef RTWN_WITHOUT_UCODE /* Notify firmware. */ id |= RTWN_MACID_VALID; rtwn_cmd_sleepable(sc, &id, sizeof(id), rtwn_set_media_status); #endif } static void rtwn_node_free(struct ieee80211_node *ni) { struct rtwn_softc *sc = ni->ni_ic->ic_softc; struct rtwn_node *un = RTWN_NODE(ni); RTWN_NT_LOCK(sc); if (un->id != RTWN_MACID_UNDEFINED) { sc->node_list[un->id] = NULL; #ifndef RTWN_WITHOUT_UCODE rtwn_cmd_sleepable(sc, &un->id, sizeof(un->id), rtwn_set_media_status); #endif } RTWN_NT_UNLOCK(sc); sc->sc_node_free(ni); } static void rtwn_init_beacon_reg(struct rtwn_softc *sc) { rtwn_write_1(sc, R92C_BCN_CTRL(0), R92C_BCN_CTRL_DIS_TSF_UDT0); rtwn_write_1(sc, R92C_BCN_CTRL(1), R92C_BCN_CTRL_DIS_TSF_UDT0); rtwn_write_2(sc, R92C_TBTT_PROHIBIT, 0x6404); rtwn_write_1(sc, R92C_DRVERLYINT, 0x05); rtwn_write_1(sc, R92C_BCNDMATIM, 0x02); rtwn_write_2(sc, R92C_BCNTCFG, 0x660f); } static int rtwn_init(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; int i, error; RTWN_LOCK(sc); if (sc->sc_flags & RTWN_RUNNING) { RTWN_UNLOCK(sc); return (0); } sc->sc_flags |= RTWN_STARTED; /* Power on adapter. */ error = rtwn_power_on(sc); if (error != 0) goto fail; #ifndef RTWN_WITHOUT_UCODE /* Load 8051 microcode. */ error = rtwn_load_firmware(sc); if (error == 0) sc->sc_flags |= RTWN_FW_LOADED; /* Init firmware commands ring. */ sc->fwcur = 0; #endif /* Initialize MAC block. */ error = rtwn_mac_init(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: error while initializing MAC block\n", __func__); goto fail; } /* Initialize DMA. */ error = rtwn_dma_init(sc); if (error != 0) goto fail; /* Drop incorrect TX (USB). */ rtwn_drop_incorrect_tx(sc); /* Set info size in Rx descriptors (in 64-bit words). */ rtwn_write_1(sc, R92C_RX_DRVINFO_SZ, R92C_RX_DRVINFO_SZ_DEF); /* Init interrupts. */ rtwn_init_intr(sc); for (i = 0; i < nitems(sc->vaps); i++) { struct rtwn_vap *uvp = sc->vaps[i]; /* Set initial network type. */ rtwn_set_mode(sc, R92C_MSR_NOLINK, i); if (uvp == NULL) continue; /* Set MAC address. */ error = rtwn_set_macaddr(sc, uvp->vap.iv_myaddr, uvp->id); if (error != 0) goto fail; } /* Initialize Rx filter. */ rtwn_rxfilter_init(sc); /* Set short/long retry limits. */ rtwn_write_2(sc, R92C_RL, SM(R92C_RL_SRL, 0x30) | SM(R92C_RL_LRL, 0x30)); /* Initialize EDCA parameters. */ rtwn_init_edca(sc); rtwn_setbits_1(sc, R92C_FWHW_TXQ_CTRL, 0, R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW); /* Set ACK timeout. */ rtwn_write_1(sc, R92C_ACKTO, sc->ackto); /* Setup aggregation. */ /* Tx aggregation. */ rtwn_init_tx_agg(sc); rtwn_init_rx_agg(sc); /* Initialize beacon parameters. */ rtwn_init_beacon_reg(sc); /* Init A-MPDU parameters. */ rtwn_init_ampdu(sc); /* Init MACTXEN / MACRXEN after setting RxFF boundary. */ rtwn_setbits_1(sc, R92C_CR, 0, R92C_CR_MACTXEN | R92C_CR_MACRXEN); /* Initialize BB/RF blocks. */ rtwn_init_bb(sc); rtwn_init_rf(sc); /* Initialize wireless band. */ rtwn_set_chan(sc, ic->ic_curchan); /* Clear per-station keys table. */ rtwn_init_cam(sc); /* Enable decryption / encryption. */ rtwn_init_seccfg(sc); /* Install static keys (if any). */ for (i = 0; i < nitems(sc->vaps); i++) { if (sc->vaps[i] != NULL) { error = rtwn_init_static_keys(sc, sc->vaps[i]); if (error != 0) goto fail; } } /* Initialize antenna selection. */ rtwn_init_antsel(sc); /* Enable hardware sequence numbering. */ rtwn_write_1(sc, R92C_HWSEQ_CTRL, R92C_TX_QUEUE_ALL); /* Disable BAR. */ rtwn_write_4(sc, R92C_BAR_MODE_CTRL, 0x0201ffff); /* NAV limit. */ rtwn_write_1(sc, R92C_NAV_UPPER, 0); /* Initialize GPIO setting. */ rtwn_setbits_1(sc, R92C_GPIO_MUXCFG, R92C_GPIO_MUXCFG_ENBT, 0); /* Initialize MRR. */ rtwn_mrr_init(sc); /* Device-specific post initialization. */ rtwn_post_init(sc); rtwn_start_xfers(sc); #ifndef D4054 callout_reset(&sc->sc_watchdog_to, hz, rtwn_watchdog, sc); #endif sc->sc_flags |= RTWN_RUNNING; fail: RTWN_UNLOCK(sc); return (error); } static void rtwn_stop(struct rtwn_softc *sc) { RTWN_LOCK(sc); if (!(sc->sc_flags & RTWN_STARTED)) { RTWN_UNLOCK(sc); return; } #ifndef D4054 callout_stop(&sc->sc_watchdog_to); sc->sc_tx_timer = 0; #endif sc->sc_flags &= ~(RTWN_STARTED | RTWN_RUNNING | RTWN_FW_LOADED); sc->sc_flags &= ~RTWN_TEMP_MEASURED; sc->fwver = 0; sc->thcal_temp = 0; sc->cur_bcnq_id = RTWN_VAP_ID_INVALID; bzero(&sc->last_physt, sizeof(sc->last_physt)); #ifdef D4054 ieee80211_tx_watchdog_stop(&sc->sc_ic); #endif rtwn_abort_xfers(sc); rtwn_drain_mbufq(sc); rtwn_power_off(sc); rtwn_reset_lists(sc, NULL); RTWN_UNLOCK(sc); } MODULE_VERSION(rtwn, 2); MODULE_DEPEND(rtwn, wlan, 1, 1, 1); #ifndef RTWN_WITHOUT_UCODE MODULE_DEPEND(rtwn, firmware, 1, 1, 1); #endif Index: projects/clang400-import/sys/dev/rtwn/if_rtwnvar.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/if_rtwnvar.h (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/if_rtwnvar.h (revision 312720) @@ -1,636 +1,639 @@ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: if_urtwnreg.h,v 1.3 2010/11/16 18:02:59 damien Exp $ * $FreeBSD$ */ #ifndef IF_RTWNVAR_H #define IF_RTWNVAR_H #include "opt_rtwn.h" #define RTWN_TX_DESC_SIZE 64 #define RTWN_TXBUFSZ (16 * 1024) #define RTWN_BCN_MAX_SIZE 512 #define RTWN_CAM_ENTRY_LIMIT 64 #define RTWN_MACID_BC 1 /* Broadcast. */ #define RTWN_MACID_UNDEFINED 0x7fff #define RTWN_MACID_VALID 0x8000 #define RTWN_MACID_LIMIT 128 #define RTWN_TX_TIMEOUT 5000 /* ms */ #define RTWN_MAX_EPOUT 4 #define RTWN_PORT_COUNT 2 #define RTWN_LED_LINK 0 #define RTWN_LED_DATA 1 struct rtwn_rx_radiotap_header { struct ieee80211_radiotap_header wr_ihdr; uint64_t wr_tsft; uint8_t wr_flags; uint8_t wr_rate; uint16_t wr_chan_freq; uint16_t wr_chan_flags; int8_t wr_dbm_antsignal; int8_t wr_dbm_antnoise; } __packed __aligned(8); #define RTWN_RX_RADIOTAP_PRESENT \ (1 << IEEE80211_RADIOTAP_TSFT | \ 1 << IEEE80211_RADIOTAP_FLAGS | \ 1 << IEEE80211_RADIOTAP_RATE | \ 1 << IEEE80211_RADIOTAP_CHANNEL | \ 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL | \ 1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) struct rtwn_tx_radiotap_header { struct ieee80211_radiotap_header wt_ihdr; uint8_t wt_flags; uint16_t wt_chan_freq; uint16_t wt_chan_flags; } __packed __aligned(8); #define RTWN_TX_RADIOTAP_PRESENT \ (1 << IEEE80211_RADIOTAP_FLAGS | \ 1 << IEEE80211_RADIOTAP_CHANNEL) struct rtwn_tx_buf { uint8_t txd[RTWN_TX_DESC_SIZE]; } __attribute__((aligned(4))); #define RTWN_PHY_STATUS_SIZE 32 struct rtwn_tx_phystat { uint32_t phydw[RTWN_PHY_STATUS_SIZE / sizeof(uint32_t)]; }; struct rtwn_softc; union sec_param { struct ieee80211_key key; int macid; }; #define CMD_FUNC_PROTO void (*func)(struct rtwn_softc *, \ union sec_param *) struct rtwn_cmdq { union sec_param data; CMD_FUNC_PROTO; }; #define RTWN_CMDQ_SIZE 16 struct rtwn_node { struct ieee80211_node ni; /* must be the first */ int id; struct rtwn_tx_phystat last_physt; int avg_pwdb; }; #define RTWN_NODE(ni) ((struct rtwn_node *)(ni)) struct rtwn_vap { struct ieee80211vap vap; int id; #define RTWN_VAP_ID_INVALID -1 int curr_mode; struct rtwn_tx_buf bcn_desc; struct mbuf *bcn_mbuf; struct timeout_task tx_beacon_csa; struct callout tsf_sync_adhoc; struct task tsf_sync_adhoc_task; const struct ieee80211_key *keys[IEEE80211_WEP_NKID]; int (*newstate)(struct ieee80211vap *, enum ieee80211_state, int); void (*recv_mgmt)(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); }; #define RTWN_VAP(vap) ((struct rtwn_vap *)(vap)) /* * Rx data types. */ enum { RTWN_RX_DATA, RTWN_RX_TX_REPORT, RTWN_RX_OTHER }; /* * Firmware reset reasons. */ enum { RTWN_FW_RESET_DOWNLOAD, RTWN_FW_RESET_CHECKSUM, RTWN_FW_RESET_SHUTDOWN }; /* * Rate control algorithm selection. */ enum { RTWN_RATECTL_NONE, RTWN_RATECTL_NET80211, RTWN_RATECTL_FW, RTWN_RATECTL_MAX }; /* * Control h/w crypto usage. */ enum { RTWN_CRYPTO_SW, RTWN_CRYPTO_PAIR, RTWN_CRYPTO_FULL, RTWN_CRYPTO_MAX, }; struct rtwn_softc { struct ieee80211com sc_ic; struct mbufq sc_snd; device_t sc_dev; #if 1 int sc_ht40; #endif uint32_t sc_debug; int sc_hwcrypto; int sc_ratectl_sysctl; int sc_ratectl; uint8_t sc_detached; uint8_t sc_flags; /* Device flags */ #define RTWN_FLAG_CCK_HIPWR 0x01 #define RTWN_FLAG_EXT_HDR 0x02 #define RTWN_FLAG_CAM_FIXED 0x04 /* Driver state */ #define RTWN_STARTED 0x08 #define RTWN_RUNNING 0x10 #define RTWN_FW_LOADED 0x20 #define RTWN_TEMP_MEASURED 0x40 #define RTWN_RCR_LOCKED 0x80 #define RTWN_CHIP_HAS_BCNQ1(_sc) \ ((_sc)->bcn_status_reg[0] != (_sc)->bcn_status_reg[1]) void *sc_priv; const char *name; int sc_ant; struct rtwn_tx_phystat last_physt; uint8_t thcal_temp; int cur_bcnq_id; int nvaps; int ap_vaps; int bcn_vaps; int mon_vaps; int vaps_running; int monvaps_running; uint16_t next_rom_addr; uint8_t keys_bmap[howmany(RTWN_CAM_ENTRY_LIMIT, NBBY)]; struct rtwn_vap *vaps[RTWN_PORT_COUNT]; struct ieee80211_node *node_list[RTWN_MACID_LIMIT]; struct mtx nt_mtx; struct callout sc_calib_to; struct callout sc_pwrmode_init; #ifndef D4054 struct callout sc_watchdog_to; int sc_tx_timer; #endif struct mtx sc_mtx; struct rtwn_cmdq cmdq[RTWN_CMDQ_SIZE]; struct mtx cmdq_mtx; struct task cmdq_task; uint8_t cmdq_first; uint8_t cmdq_last; struct wmeParams cap_wmeParams[WME_NUM_AC]; struct rtwn_rx_radiotap_header sc_rxtap; struct rtwn_tx_radiotap_header sc_txtap; int ntxchains; int nrxchains; int ledlink; uint8_t thermal_meter; int sc_tx_n_active; uint8_t qfullmsk; /* Firmware-specific */ const char *fwname; uint16_t fwver; uint16_t fwsig; int fwcur; void (*sc_node_free)(struct ieee80211_node *); void (*sc_scan_curchan)(struct ieee80211_scan_state *, unsigned long); /* Interface-specific. */ int (*sc_write_1)(struct rtwn_softc *, uint16_t, uint8_t); int (*sc_write_2)(struct rtwn_softc *, uint16_t, uint16_t); int (*sc_write_4)(struct rtwn_softc *, uint16_t, uint32_t); uint8_t (*sc_read_1)(struct rtwn_softc *, uint16_t); uint16_t (*sc_read_2)(struct rtwn_softc *, uint16_t); uint32_t (*sc_read_4)(struct rtwn_softc *, uint16_t); /* XXX eliminate */ void (*sc_delay)(struct rtwn_softc *, int); int (*sc_tx_start)(struct rtwn_softc *, struct ieee80211_node *, struct mbuf *, uint8_t *, uint8_t, int); void (*sc_start_xfers)(struct rtwn_softc *); void (*sc_reset_lists)(struct rtwn_softc *, struct ieee80211vap *); void (*sc_abort_xfers)(struct rtwn_softc *); int (*sc_fw_write_block)(struct rtwn_softc *, const uint8_t *, uint16_t, int); uint16_t (*sc_get_qmap)(struct rtwn_softc *); void (*sc_set_desc_addr)(struct rtwn_softc *); void (*sc_drop_incorrect_tx)(struct rtwn_softc *); void (*sc_beacon_update_begin)(struct rtwn_softc *, struct ieee80211vap *); void (*sc_beacon_update_end)(struct rtwn_softc *, struct ieee80211vap *); void (*sc_beacon_unload)(struct rtwn_softc *, int); /* XXX drop checks for PCIe? */ int bcn_check_interval; /* Device-specific. */ uint32_t (*sc_rf_read)(struct rtwn_softc *, int, uint8_t); void (*sc_rf_write)(struct rtwn_softc *, int, uint8_t, uint32_t); int (*sc_check_condition)(struct rtwn_softc *, const uint8_t[]); void (*sc_efuse_postread)(struct rtwn_softc *); void (*sc_parse_rom)(struct rtwn_softc *, uint8_t *); void (*sc_set_led)(struct rtwn_softc *, int, int); int (*sc_power_on)(struct rtwn_softc *); void (*sc_power_off)(struct rtwn_softc *); #ifndef RTWN_WITHOUT_UCODE void (*sc_fw_reset)(struct rtwn_softc *, int); void (*sc_fw_download_enable)(struct rtwn_softc *, int); #endif + int (*sc_llt_init)(struct rtwn_softc *); int (*sc_set_page_size)(struct rtwn_softc *); void (*sc_lc_calib)(struct rtwn_softc *); void (*sc_iq_calib)(struct rtwn_softc *); void (*sc_read_chipid_vendor)(struct rtwn_softc *, uint32_t); void (*sc_adj_devcaps)(struct rtwn_softc *); void (*sc_vap_preattach)(struct rtwn_softc *, struct ieee80211vap *); void (*sc_postattach)(struct rtwn_softc *); void (*sc_detach_private)(struct rtwn_softc *); void (*sc_fill_tx_desc)(struct rtwn_softc *, struct ieee80211_node *, struct mbuf *, void *, uint8_t, int); void (*sc_fill_tx_desc_raw)(struct rtwn_softc *, struct ieee80211_node *, struct mbuf *, void *, const struct ieee80211_bpf_params *); void (*sc_fill_tx_desc_null)(struct rtwn_softc *, void *, int, int, int); void (*sc_dump_tx_desc)(struct rtwn_softc *, const void *); uint8_t (*sc_tx_radiotap_flags)(const void *); uint8_t (*sc_rx_radiotap_flags)(const void *); void (*sc_beacon_init)(struct rtwn_softc *, void *, int); void (*sc_beacon_enable)(struct rtwn_softc *, int, int); void (*sc_beacon_set_rate)(void *, int); void (*sc_beacon_select)(struct rtwn_softc *, int); void (*sc_set_chan)(struct rtwn_softc *, struct ieee80211_channel *); void (*sc_set_media_status)(struct rtwn_softc *, int); #ifndef RTWN_WITHOUT_UCODE int (*sc_set_rsvd_page)(struct rtwn_softc *, int, int, int); int (*sc_set_pwrmode)(struct rtwn_softc *, struct ieee80211vap *, int); void (*sc_set_rssi)(struct rtwn_softc *); #endif void (*sc_get_rx_stats)(struct rtwn_softc *, struct ieee80211_rx_stats *, const void *, const void *); int8_t (*sc_get_rssi_cck)(struct rtwn_softc *, void *); int8_t (*sc_get_rssi_ofdm)(struct rtwn_softc *, void *); int (*sc_classify_intr)(struct rtwn_softc *, void *, int); void (*sc_handle_tx_report)(struct rtwn_softc *, uint8_t *, int); void (*sc_handle_c2h_report)(struct rtwn_softc *, uint8_t *, int); int (*sc_check_frame)(struct rtwn_softc *, struct mbuf *); void (*sc_temp_measure)(struct rtwn_softc *); uint8_t (*sc_temp_read)(struct rtwn_softc *); void (*sc_init_tx_agg)(struct rtwn_softc *); void (*sc_init_rx_agg)(struct rtwn_softc *); void (*sc_init_intr)(struct rtwn_softc *); void (*sc_init_ampdu)(struct rtwn_softc *); void (*sc_init_edca)(struct rtwn_softc *); void (*sc_init_bb)(struct rtwn_softc *); void (*sc_init_rf)(struct rtwn_softc *); void (*sc_init_antsel)(struct rtwn_softc *); void (*sc_post_init)(struct rtwn_softc *); int (*sc_init_bcnq1_boundary)(struct rtwn_softc *); const uint8_t *chan_list_5ghz[3]; int chan_num_5ghz[3]; const struct rtwn_mac_prog *mac_prog; int mac_size; const struct rtwn_bb_prog *bb_prog; int bb_size; const struct rtwn_agc_prog *agc_prog; int agc_size; const struct rtwn_rf_prog *rf_prog; int page_count; int pktbuf_count; int ackto; int npubqpages; int nhqpages; int nnqpages; int nlqpages; int page_size; int txdesc_len; int efuse_maxlen; int efuse_maplen; uint16_t rx_dma_size; int macid_limit; int cam_entry_limit; int fwsize_limit; int temp_delta; uint16_t bcn_status_reg[RTWN_PORT_COUNT]; uint32_t rcr; /* Rx filter */ }; MALLOC_DECLARE(M_RTWN_PRIV); #define RTWN_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define RTWN_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define RTWN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) #define RTWN_CMDQ_LOCK_INIT(sc) \ mtx_init(&(sc)->cmdq_mtx, "cmdq lock", NULL, MTX_DEF) #define RTWN_CMDQ_LOCK(sc) mtx_lock(&(sc)->cmdq_mtx) #define RTWN_CMDQ_UNLOCK(sc) mtx_unlock(&(sc)->cmdq_mtx) #define RTWN_CMDQ_LOCK_INITIALIZED(sc) mtx_initialized(&(sc)->cmdq_mtx) #define RTWN_CMDQ_LOCK_DESTROY(sc) mtx_destroy(&(sc)->cmdq_mtx) #define RTWN_NT_LOCK_INIT(sc) \ mtx_init(&(sc)->nt_mtx, "node table lock", NULL, MTX_DEF) #define RTWN_NT_LOCK(sc) mtx_lock(&(sc)->nt_mtx) #define RTWN_NT_UNLOCK(sc) mtx_unlock(&(sc)->nt_mtx) #define RTWN_NT_LOCK_INITIALIZED(sc) mtx_initialized(&(sc)->nt_mtx) #define RTWN_NT_LOCK_DESTROY(sc) mtx_destroy(&(sc)->nt_mtx) void rtwn_sysctlattach(struct rtwn_softc *); int rtwn_attach(struct rtwn_softc *); void rtwn_detach(struct rtwn_softc *); void rtwn_resume(struct rtwn_softc *); void rtwn_suspend(struct rtwn_softc *); /* Interface-specific. */ #define rtwn_write_1(_sc, _addr, _val) \ (((_sc)->sc_write_1)((_sc), (_addr), (_val))) #define rtwn_write_2(_sc, _addr, _val) \ (((_sc)->sc_write_2)((_sc), (_addr), (_val))) #define rtwn_write_4(_sc, _addr, _val) \ (((_sc)->sc_write_4)((_sc), (_addr), (_val))) #define rtwn_read_1(_sc, _addr) \ (((_sc)->sc_read_1)((_sc), (_addr))) #define rtwn_read_2(_sc, _addr) \ (((_sc)->sc_read_2)((_sc), (_addr))) #define rtwn_read_4(_sc, _addr) \ (((_sc)->sc_read_4)((_sc), (_addr))) #define rtwn_delay(_sc, _usec) \ (((_sc)->sc_delay)((_sc), (_usec))) #define rtwn_tx_start(_sc, _ni, _m, _desc, _type, _id) \ (((_sc)->sc_tx_start)((_sc), (_ni), (_m), (_desc), (_type), (_id))) #define rtwn_start_xfers(_sc) \ (((_sc)->sc_start_xfers)((_sc))) #define rtwn_reset_lists(_sc, _vap) \ (((_sc)->sc_reset_lists)((_sc), (_vap))) #define rtwn_abort_xfers(_sc) \ (((_sc)->sc_abort_xfers)((_sc))) #define rtwn_fw_write_block(_sc, _buf, _reg, _len) \ (((_sc)->sc_fw_write_block)((_sc), (_buf), (_reg), (_len))) #define rtwn_get_qmap(_sc) \ (((_sc)->sc_get_qmap)((_sc))) #define rtwn_set_desc_addr(_sc) \ (((_sc)->sc_set_desc_addr)((_sc))) #define rtwn_drop_incorrect_tx(_sc) \ (((_sc)->sc_drop_incorrect_tx)((_sc))) #define rtwn_beacon_update_begin(_sc, _vap) \ (((_sc)->sc_beacon_update_begin)((_sc), (_vap))) #define rtwn_beacon_update_end(_sc, _vap) \ (((_sc)->sc_beacon_update_end)((_sc), (_vap))) #define rtwn_beacon_unload(_sc, _id) \ (((_sc)->sc_beacon_unload)((_sc), (_id))) /* Aliases. */ #define rtwn_bb_write rtwn_write_4 -#define rtwn_bb_read rtwn_read_4 -#define rtwn_bb_setbits rtwn_setbits_4 +#define rtwn_bb_read rtwn_read_4 +#define rtwn_bb_setbits rtwn_setbits_4 /* Device-specific. */ #define rtwn_rf_read(_sc, _chain, _addr) \ (((_sc)->sc_rf_read)((_sc), (_chain), (_addr))) #define rtwn_rf_write(_sc, _chain, _addr, _val) \ (((_sc)->sc_rf_write)((_sc), (_chain), (_addr), (_val))) #define rtwn_check_condition(_sc, _cond) \ (((_sc)->sc_check_condition)((_sc), (_cond))) #define rtwn_efuse_postread(_sc) \ (((_sc)->sc_efuse_postread)((_sc))) #define rtwn_parse_rom(_sc, _rom) \ (((_sc)->sc_parse_rom)((_sc), (_rom))) #define rtwn_set_led(_sc, _led, _on) \ (((_sc)->sc_set_led)((_sc), (_led), (_on))) #define rtwn_get_rx_stats(_sc, _rxs, _desc, _physt) \ (((_sc)->sc_get_rx_stats((_sc), (_rxs), (_desc), (_physt)))) #define rtwn_get_rssi_cck(_sc, _physt) \ (((_sc)->sc_get_rssi_cck)((_sc), (_physt))) #define rtwn_get_rssi_ofdm(_sc, _physt) \ (((_sc)->sc_get_rssi_ofdm)((_sc), (_physt))) #define rtwn_power_on(_sc) \ (((_sc)->sc_power_on)((_sc))) #define rtwn_power_off(_sc) \ (((_sc)->sc_power_off)((_sc))) #ifndef RTWN_WITHOUT_UCODE #define rtwn_fw_reset(_sc, _reason) \ (((_sc)->sc_fw_reset)((_sc), (_reason))) #define rtwn_fw_download_enable(_sc, _enable) \ (((_sc)->sc_fw_download_enable)((_sc), (_enable))) #endif +#define rtwn_llt_init(_sc) \ + (((_sc)->sc_llt_init)((_sc))) #define rtwn_set_page_size(_sc) \ (((_sc)->sc_set_page_size)((_sc))) #define rtwn_lc_calib(_sc) \ (((_sc)->sc_lc_calib)((_sc))) #define rtwn_iq_calib(_sc) \ (((_sc)->sc_iq_calib)((_sc))) #define rtwn_read_chipid_vendor(_sc, _reg) \ (((_sc)->sc_read_chipid_vendor)((_sc), (_reg))) #define rtwn_adj_devcaps(_sc) \ (((_sc)->sc_adj_devcaps)((_sc))) #define rtwn_vap_preattach(_sc, _vap) \ (((_sc)->sc_vap_preattach)((_sc), (_vap))) #define rtwn_postattach(_sc) \ (((_sc)->sc_postattach)((_sc))) #define rtwn_detach_private(_sc) \ (((_sc)->sc_detach_private)((_sc))) #define rtwn_fill_tx_desc(_sc, _ni, _m, \ _buf, _ridx, _maxretry) \ (((_sc)->sc_fill_tx_desc)((_sc), (_ni), \ (_m), (_buf), (_ridx), (_maxretry))) #define rtwn_fill_tx_desc_raw(_sc, _ni, _m, \ _buf, _params) \ (((_sc)->sc_fill_tx_desc_raw)((_sc), (_ni), \ (_m), (_buf), (_params))) #define rtwn_fill_tx_desc_null(_sc, _buf, _11b, _qos, _id) \ (((_sc)->sc_fill_tx_desc_null)((_sc), \ (_buf), (_11b), (_qos), (_id))) #define rtwn_dump_tx_desc(_sc, _desc) \ (((_sc)->sc_dump_tx_desc)((_sc), (_desc))) #define rtwn_tx_radiotap_flags(_sc, _buf) \ (((_sc)->sc_tx_radiotap_flags)((_buf))) #define rtwn_rx_radiotap_flags(_sc, _buf) \ (((_sc)->sc_rx_radiotap_flags)((_buf))) #define rtwn_set_chan(_sc, _c) \ (((_sc)->sc_set_chan)((_sc), (_c))) #ifndef RTWN_WITHOUT_UCODE #define rtwn_set_rsvd_page(_sc, _resp, _null, _qos_null) \ (((_sc)->sc_set_rsvd_page)((_sc), \ (_resp), (_null), (_qos_null))) #define rtwn_set_pwrmode(_sc, _vap, _off) \ (((_sc)->sc_set_pwrmode)((_sc), (_vap), (_off))) #define rtwn_set_rssi(_sc) \ (((_sc)->sc_set_rssi)((_sc))) #endif #define rtwn_classify_intr(_sc, _buf, _len) \ (((_sc)->sc_classify_intr)((_sc), (_buf), (_len))) #define rtwn_handle_tx_report(_sc, _buf, _len) \ (((_sc)->sc_handle_tx_report)((_sc), (_buf), (_len))) #define rtwn_handle_c2h_report(_sc, _buf, _len) \ (((_sc)->sc_handle_c2h_report)((_sc), (_buf), (_len))) #define rtwn_check_frame(_sc, _m) \ (((_sc)->sc_check_frame)((_sc), (_m))) #define rtwn_beacon_init(_sc, _buf, _id) \ (((_sc)->sc_beacon_init)((_sc), (_buf), (_id))) #define rtwn_beacon_enable(_sc, _id, _enable) \ (((_sc)->sc_beacon_enable)((_sc), (_id), (_enable))) #define rtwn_beacon_set_rate(_sc, _buf, _is5ghz) \ (((_sc)->sc_beacon_set_rate)((_buf), (_is5ghz))) #define rtwn_beacon_select(_sc, _id) \ (((_sc)->sc_beacon_select)((_sc), (_id))) #define rtwn_temp_measure(_sc) \ (((_sc)->sc_temp_measure)((_sc))) #define rtwn_temp_read(_sc) \ (((_sc)->sc_temp_read)((_sc))) #define rtwn_init_tx_agg(_sc) \ (((_sc)->sc_init_tx_agg)((_sc))) #define rtwn_init_rx_agg(_sc) \ (((_sc)->sc_init_rx_agg)((_sc))) #define rtwn_init_intr(_sc) \ (((_sc)->sc_init_intr)((_sc))) #define rtwn_init_ampdu(_sc) \ (((_sc)->sc_init_ampdu)((_sc))) #define rtwn_init_edca(_sc) \ (((_sc)->sc_init_edca)((_sc))) #define rtwn_init_bb(_sc) \ (((_sc)->sc_init_bb)((_sc))) #define rtwn_init_rf(_sc) \ (((_sc)->sc_init_rf)((_sc))) #define rtwn_init_antsel(_sc) \ (((_sc)->sc_init_antsel)((_sc))) #define rtwn_post_init(_sc) \ (((_sc)->sc_post_init)((_sc))) #define rtwn_init_bcnq1_boundary(_sc) \ (((_sc)->sc_init_bcnq1_boundary)((_sc))) /* * Methods to access subfields in registers. */ static __inline int rtwn_setbits_1(struct rtwn_softc *sc, uint16_t addr, uint8_t clr, uint8_t set) { return (rtwn_write_1(sc, addr, (rtwn_read_1(sc, addr) & ~clr) | set)); } static __inline int rtwn_setbits_1_shift(struct rtwn_softc *sc, uint16_t addr, uint32_t clr, uint32_t set, int shift) { return (rtwn_setbits_1(sc, addr + shift, clr >> shift * NBBY, set >> shift * NBBY)); } static __inline int rtwn_setbits_2(struct rtwn_softc *sc, uint16_t addr, uint16_t clr, uint16_t set) { return (rtwn_write_2(sc, addr, (rtwn_read_2(sc, addr) & ~clr) | set)); } static __inline int rtwn_setbits_4(struct rtwn_softc *sc, uint16_t addr, uint32_t clr, uint32_t set) { return (rtwn_write_4(sc, addr, (rtwn_read_4(sc, addr) & ~clr) | set)); } static __inline void rtwn_rf_setbits(struct rtwn_softc *sc, int chain, uint8_t addr, uint32_t clr, uint32_t set) { rtwn_rf_write(sc, chain, addr, (rtwn_rf_read(sc, chain, addr) & ~clr) | set); } #endif /* IF_RTWNVAR_H */ Index: projects/clang400-import/sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c (revision 312720) @@ -1,217 +1,218 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct rtwn_r88e_txpwr r88e_txpwr; void r88eu_attach(struct rtwn_usb_softc *); static void r88e_postattach(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; struct ieee80211com *ic = &sc->sc_ic; rs->rs_scan_start = ic->ic_scan_start; ic->ic_scan_start = r92c_scan_start; rs->rs_scan_end = ic->ic_scan_end; ic->ic_scan_end = r92c_scan_end; } static void r88eu_attach_private(struct rtwn_softc *sc) { struct r92c_softc *rs; rs = malloc(sizeof(struct r92c_softc), M_RTWN_PRIV, M_WAITOK | M_ZERO); rs->rs_txpwr = &r88e_txpwr; rs->rs_txagc = &r88e_txagc; rs->rs_set_bw20 = r88e_set_bw20; rs->rs_get_txpower = r88e_get_txpower; rs->rs_set_gain = r88e_set_gain; rs->rs_tx_enable_ampdu = r88e_tx_enable_ampdu; rs->rs_tx_setup_hwseq = r88e_tx_setup_hwseq; rs->rs_tx_setup_macid = r88e_tx_setup_macid; rs->rs_set_name = rtwn_nop_softc; /* not used */ rs->rf_read_delay[0] = 10; rs->rf_read_delay[1] = 100; rs->rf_read_delay[2] = 10; sc->sc_priv = rs; } static void r88eu_adj_devcaps(struct rtwn_softc *sc) { /* XXX TODO? */ } void r88eu_attach(struct rtwn_usb_softc *uc) { struct rtwn_softc *sc = &uc->uc_sc; /* USB part. */ uc->uc_align_rx = r92cu_align_rx; uc->tx_agg_desc_num = 6; /* Common part. */ sc->sc_flags = RTWN_FLAG_EXT_HDR; sc->sc_set_chan = r92c_set_chan; sc->sc_fill_tx_desc = r92c_fill_tx_desc; sc->sc_fill_tx_desc_raw = r92c_fill_tx_desc_raw; sc->sc_fill_tx_desc_null = r92c_fill_tx_desc_null; sc->sc_dump_tx_desc = r92cu_dump_tx_desc; sc->sc_tx_radiotap_flags = r92c_tx_radiotap_flags; sc->sc_rx_radiotap_flags = r92c_rx_radiotap_flags; sc->sc_get_rx_stats = r88e_get_rx_stats; sc->sc_get_rssi_cck = r88e_get_rssi_cck; sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm; sc->sc_classify_intr = r88eu_classify_intr; sc->sc_handle_tx_report = r88e_ratectl_tx_complete; sc->sc_handle_c2h_report = r88e_handle_c2h_report; sc->sc_check_frame = rtwn_nop_int_softc_mbuf; sc->sc_rf_read = r92c_rf_read; sc->sc_rf_write = r88e_rf_write; sc->sc_check_condition = r92c_check_condition; sc->sc_efuse_postread = rtwn_nop_softc; sc->sc_parse_rom = r88e_parse_rom; sc->sc_set_led = r88e_set_led; sc->sc_power_on = r88e_power_on; sc->sc_power_off = r88eu_power_off; #ifndef RTWN_WITHOUT_UCODE sc->sc_fw_reset = r88e_fw_reset; sc->sc_fw_download_enable = r88e_fw_download_enable; #endif + sc->sc_llt_init = r92c_llt_init; sc->sc_set_page_size = r92c_set_page_size; sc->sc_lc_calib = r92c_lc_calib; sc->sc_iq_calib = r88e_iq_calib; /* XXX TODO */ sc->sc_read_chipid_vendor = rtwn_nop_softc_uint32; sc->sc_adj_devcaps = r88eu_adj_devcaps; sc->sc_vap_preattach = rtwn_nop_softc_vap; sc->sc_postattach = r88e_postattach; sc->sc_detach_private = r92c_detach_private; sc->sc_set_media_status = r88e_set_media_status; #ifndef RTWN_WITHOUT_UCODE sc->sc_set_rsvd_page = r88e_set_rsvd_page; sc->sc_set_pwrmode = r88e_set_pwrmode; sc->sc_set_rssi = rtwn_nop_softc; /* XXX TODO? */ #endif sc->sc_beacon_init = r92c_beacon_init; sc->sc_beacon_enable = r88e_beacon_enable; sc->sc_beacon_set_rate = rtwn_nop_void_int; sc->sc_beacon_select = rtwn_nop_softc_int; sc->sc_temp_measure = r88e_temp_measure; sc->sc_temp_read = r88e_temp_read; sc->sc_init_tx_agg = r92cu_init_tx_agg; sc->sc_init_rx_agg = r88eu_init_rx_agg; sc->sc_init_ampdu = rtwn_nop_softc; sc->sc_init_intr = r88eu_init_intr; sc->sc_init_edca = r92c_init_edca; sc->sc_init_bb = r88e_init_bb; sc->sc_init_rf = r92c_init_rf; sc->sc_init_antsel = rtwn_nop_softc; sc->sc_post_init = r88eu_post_init; sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc; sc->mac_prog = &rtl8188eu_mac[0]; sc->mac_size = nitems(rtl8188eu_mac); sc->bb_prog = &rtl8188eu_bb[0]; sc->bb_size = nitems(rtl8188eu_bb); sc->agc_prog = &rtl8188eu_agc[0]; sc->agc_size = nitems(rtl8188eu_agc); sc->rf_prog = &rtl8188eu_rf[0]; sc->name = "RTL8188EU"; sc->fwname = "rtwn-rtl8188eufw"; sc->fwsig = 0x88e; sc->page_count = R88E_TX_PAGE_COUNT; sc->pktbuf_count = R88E_TXPKTBUF_COUNT; sc->ackto = 0x40; sc->npubqpages = R88E_PUBQ_NPAGES; sc->page_size = R92C_TX_PAGE_SIZE; sc->txdesc_len = sizeof(struct r92cu_tx_desc); sc->efuse_maxlen = R88E_EFUSE_MAX_LEN; sc->efuse_maplen = R88E_EFUSE_MAP_LEN; sc->rx_dma_size = R88E_RX_DMA_BUFFER_SIZE; sc->macid_limit = R88E_MACID_MAX + 1; sc->cam_entry_limit = R92C_CAM_ENTRY_COUNT; sc->fwsize_limit = R92C_MAX_FW_SIZE; sc->temp_delta = R88E_CALIB_THRESHOLD; sc->bcn_status_reg[0] = R92C_TDECTRL; sc->bcn_status_reg[1] = R92C_TDECTRL; sc->rcr = 0; sc->ntxchains = 1; sc->nrxchains = 1; r88eu_attach_private(sc); } Index: projects/clang400-import/sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c (revision 312720) @@ -1,264 +1,265 @@ /* $OpenBSD: if_rtwn.c,v 1.6 2015/08/28 00:03:53 deraadt Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2015 Stefan Sperling * Copyright (c) 2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct rtwn_r92c_txpwr r92c_txpwr; void r92ce_attach(struct rtwn_pci_softc *); static void r92ce_postattach(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; struct ieee80211com *ic = &sc->sc_ic; if (!(rs->chip & R92C_CHIP_92C) && rs->board_type == R92C_BOARD_TYPE_HIGHPA) rs->rs_txagc = &rtl8188ru_txagc[0]; else rs->rs_txagc = &rtl8192cu_txagc[0]; if ((rs->chip & (R92C_CHIP_UMC_A_CUT | R92C_CHIP_92C)) == R92C_CHIP_UMC_A_CUT) sc->fwname = "rtwn-rtl8192cfwE"; else sc->fwname = "rtwn-rtl8192cfwE_B"; sc->fwsig = 0x88c; rs->rs_scan_start = ic->ic_scan_start; ic->ic_scan_start = r92c_scan_start; rs->rs_scan_end = ic->ic_scan_end; ic->ic_scan_end = r92c_scan_end; } static void r92ce_set_name(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; if (rs->chip & R92C_CHIP_92C) sc->name = "RTL8192CE"; else sc->name = "RTL8188CE"; } static void r92ce_attach_private(struct rtwn_softc *sc) { struct r92c_softc *rs; rs = malloc(sizeof(struct r92c_softc), M_RTWN_PRIV, M_WAITOK | M_ZERO); rs->rs_txpwr = &r92c_txpwr; rs->rs_set_bw20 = r92c_set_bw20; rs->rs_get_txpower = r92c_get_txpower; rs->rs_set_gain = r92c_set_gain; rs->rs_tx_enable_ampdu = r92c_tx_enable_ampdu; rs->rs_tx_setup_hwseq = r92c_tx_setup_hwseq; rs->rs_tx_setup_macid = r92c_tx_setup_macid; rs->rs_set_name = r92ce_set_name; /* XXX TODO: test with net80211 ratectl! */ #ifndef RTWN_WITHOUT_UCODE rs->rs_c2h_timeout = hz; callout_init_mtx(&rs->rs_c2h_report, &sc->sc_mtx, 0); #endif rs->rf_read_delay[0] = 1000; rs->rf_read_delay[1] = 1000; rs->rf_read_delay[2] = 1000; sc->sc_priv = rs; } static void r92ce_adj_devcaps(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* * XXX do NOT enable PMGT until RSVD_PAGE command * will not be tested / fixed + HRPWM register must be set too. */ ic->ic_caps &= ~IEEE80211_C_PMGT; } void r92ce_attach(struct rtwn_pci_softc *pc) { struct rtwn_softc *sc = &pc->pc_sc; /* PCIe part. */ pc->pc_setup_tx_desc = r92ce_setup_tx_desc; pc->pc_tx_postsetup = r92ce_tx_postsetup; pc->pc_copy_tx_desc = r92ce_copy_tx_desc; pc->pc_enable_intr = r92ce_enable_intr; pc->pc_qmap = 0xf771; pc->tcr = R92C_TCR_CFENDFORM | (1 << 12) | (1 << 13); /* Common part. */ /* RTL8192C* cannot use pairwise keys from first 4 slots */ sc->sc_flags = RTWN_FLAG_CAM_FIXED; sc->sc_start_xfers = r92ce_start_xfers; sc->sc_set_chan = r92c_set_chan; sc->sc_fill_tx_desc = r92c_fill_tx_desc; sc->sc_fill_tx_desc_raw = r92c_fill_tx_desc_raw; sc->sc_fill_tx_desc_null = r92c_fill_tx_desc_null; /* XXX recheck */ sc->sc_dump_tx_desc = r92ce_dump_tx_desc; sc->sc_tx_radiotap_flags = r92c_tx_radiotap_flags; sc->sc_rx_radiotap_flags = r92c_rx_radiotap_flags; sc->sc_get_rx_stats = r92c_get_rx_stats; sc->sc_get_rssi_cck = r92c_get_rssi_cck; sc->sc_get_rssi_ofdm = r92c_get_rssi_ofdm; sc->sc_classify_intr = r92ce_classify_intr; sc->sc_handle_tx_report = rtwn_nop_softc_uint8_int; sc->sc_handle_c2h_report = rtwn_nop_softc_uint8_int; sc->sc_check_frame = rtwn_nop_int_softc_mbuf; sc->sc_rf_read = r92c_rf_read; sc->sc_rf_write = r92c_rf_write; sc->sc_check_condition = r92c_check_condition; sc->sc_efuse_postread = r92c_efuse_postread; sc->sc_parse_rom = r92c_parse_rom; sc->sc_set_led = r92ce_set_led; sc->sc_power_on = r92ce_power_on; sc->sc_power_off = r92ce_power_off; #ifndef RTWN_WITHOUT_UCODE sc->sc_fw_reset = r92ce_fw_reset; sc->sc_fw_download_enable = r92c_fw_download_enable; #endif + sc->sc_llt_init = r92c_llt_init; sc->sc_set_page_size = r92c_set_page_size; sc->sc_lc_calib = r92c_lc_calib; sc->sc_iq_calib = r92ce_iq_calib; sc->sc_read_chipid_vendor = r92c_read_chipid_vendor; sc->sc_adj_devcaps = r92ce_adj_devcaps; sc->sc_vap_preattach = rtwn_nop_softc_vap; sc->sc_postattach = r92ce_postattach; sc->sc_detach_private = r92c_detach_private; sc->sc_set_media_status = r92c_joinbss_rpt; #ifndef RTWN_WITHOUT_UCODE sc->sc_set_rsvd_page = r92c_set_rsvd_page; sc->sc_set_pwrmode = r92c_set_pwrmode; sc->sc_set_rssi = r92c_set_rssi; #endif sc->sc_beacon_init = r92c_beacon_init; sc->sc_beacon_enable = r92c_beacon_enable; sc->sc_beacon_set_rate = rtwn_nop_void_int; sc->sc_beacon_select = rtwn_nop_softc_int; sc->sc_temp_measure = r92c_temp_measure; sc->sc_temp_read = r92c_temp_read; sc->sc_init_tx_agg = rtwn_nop_softc; sc->sc_init_rx_agg = rtwn_nop_softc; sc->sc_init_ampdu = r92ce_init_ampdu; sc->sc_init_intr = r92ce_init_intr; sc->sc_init_edca = r92ce_init_edca; sc->sc_init_bb = r92ce_init_bb; sc->sc_init_rf = r92c_init_rf; sc->sc_init_antsel = rtwn_nop_softc; sc->sc_post_init = r92ce_post_init; sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc; sc->mac_prog = &rtl8192ce_mac[0]; sc->mac_size = nitems(rtl8192ce_mac); sc->bb_prog = &rtl8192ce_bb[0]; sc->bb_size = nitems(rtl8192ce_bb); sc->agc_prog = &rtl8192ce_agc[0]; sc->agc_size = nitems(rtl8192ce_agc); sc->rf_prog = &rtl8192c_rf[0]; sc->page_count = R92CE_TX_PAGE_COUNT; sc->pktbuf_count = R92C_TXPKTBUF_COUNT; sc->ackto = 0x40; sc->npubqpages = R92CE_PUBQ_NPAGES; sc->nhqpages = R92CE_HPQ_NPAGES; sc->nnqpages = 0; sc->nlqpages = R92CE_LPQ_NPAGES; sc->page_size = R92C_TX_PAGE_SIZE; sc->txdesc_len = sizeof(struct r92ce_tx_desc); sc->efuse_maxlen = R92C_EFUSE_MAX_LEN; sc->efuse_maplen = R92C_EFUSE_MAP_LEN; sc->rx_dma_size = R92C_RX_DMA_BUFFER_SIZE; sc->macid_limit = R92C_MACID_MAX + 1; sc->cam_entry_limit = R92C_CAM_ENTRY_COUNT; sc->fwsize_limit = R92C_MAX_FW_SIZE; sc->temp_delta = R92C_CALIB_THRESHOLD; sc->bcn_status_reg[0] = R92C_TDECTRL; /* * TODO: some additional setup is required * to maintain few beacons at the same time. * * XXX BCNQ1 mechanism is not needed here; move it to the USB module. */ sc->bcn_status_reg[1] = R92C_TDECTRL; sc->rcr = 0; r92ce_attach_private(sc); } Index: projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c.h (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c.h (revision 312720) @@ -1,116 +1,120 @@ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: if_urtwnreg.h,v 1.3 2010/11/16 18:02:59 damien Exp $ * $FreeBSD$ */ #ifndef RTL8192C_H #define RTL8192C_H /* * Global definitions. */ #define R92C_TXPKTBUF_COUNT 256 #define R92C_TX_PAGE_SIZE 128 #define R92C_RX_DMA_BUFFER_SIZE 0x2800 #define R92C_MAX_FW_SIZE 0x4000 #define R92C_MACID_MAX 31 #define R92C_CAM_ENTRY_COUNT 32 #define R92C_CALIB_THRESHOLD 2 /* * Function declarations. */ /* r92c_attach.c */ void r92c_detach_private(struct rtwn_softc *); void r92c_read_chipid_vendor(struct rtwn_softc *, uint32_t); /* r92c_beacon.c */ void r92c_beacon_init(struct rtwn_softc *, void *, int); void r92c_beacon_enable(struct rtwn_softc *, int, int); /* r92c_calib.c */ void r92c_iq_calib(struct rtwn_softc *); void r92c_lc_calib(struct rtwn_softc *); void r92c_temp_measure(struct rtwn_softc *); uint8_t r92c_temp_read(struct rtwn_softc *); /* r92c_chan.c */ void r92c_get_txpower(struct rtwn_softc *, int, struct ieee80211_channel *, uint16_t[]); void r92c_set_bw20(struct rtwn_softc *, uint8_t); void r92c_set_chan(struct rtwn_softc *, struct ieee80211_channel *); void r92c_set_gain(struct rtwn_softc *, uint8_t); void r92c_scan_start(struct ieee80211com *); void r92c_scan_end(struct ieee80211com *); /* r92c_fw.c */ #ifndef RTWN_WITHOUT_UCODE void r92c_fw_reset(struct rtwn_softc *, int); void r92c_fw_download_enable(struct rtwn_softc *, int); #endif void r92c_joinbss_rpt(struct rtwn_softc *, int); #ifndef RTWN_WITHOUT_UCODE int r92c_set_rsvd_page(struct rtwn_softc *, int, int, int); int r92c_set_pwrmode(struct rtwn_softc *, struct ieee80211vap *, int); void r92c_set_rssi(struct rtwn_softc *); void r92c_handle_c2h_report(void *); #endif /* r92c_init.c */ int r92c_check_condition(struct rtwn_softc *, const uint8_t[]); +int r92c_llt_init(struct rtwn_softc *); int r92c_set_page_size(struct rtwn_softc *); void r92c_init_bb_common(struct rtwn_softc *); int r92c_init_rf_chain(struct rtwn_softc *, const struct rtwn_rf_prog *, int); void r92c_init_rf(struct rtwn_softc *); void r92c_init_edca(struct rtwn_softc *); void r92c_init_ampdu(struct rtwn_softc *); void r92c_init_antsel(struct rtwn_softc *); void r92c_pa_bias_init(struct rtwn_softc *); + +/* r92c_llt.c */ +int r92c_llt_write(struct rtwn_softc *, uint32_t, uint32_t); /* r92c_rf.c */ uint32_t r92c_rf_read(struct rtwn_softc *, int, uint8_t); void r92c_rf_write(struct rtwn_softc *, int, uint8_t, uint32_t); /* r92c_rom.c */ void r92c_efuse_postread(struct rtwn_softc *); void r92c_parse_rom(struct rtwn_softc *, uint8_t *); /* r92c_rx.c */ int8_t r92c_get_rssi_cck(struct rtwn_softc *, void *); int8_t r92c_get_rssi_ofdm(struct rtwn_softc *, void *); uint8_t r92c_rx_radiotap_flags(const void *); void r92c_get_rx_stats(struct rtwn_softc *, struct ieee80211_rx_stats *, const void *, const void *); /* r92c_tx.c */ void r92c_tx_enable_ampdu(void *, int); void r92c_tx_setup_hwseq(void *); void r92c_tx_setup_macid(void *, int); void r92c_fill_tx_desc(struct rtwn_softc *, struct ieee80211_node *, struct mbuf *, void *, uint8_t, int); void r92c_fill_tx_desc_raw(struct rtwn_softc *, struct ieee80211_node *, struct mbuf *, void *, const struct ieee80211_bpf_params *); void r92c_fill_tx_desc_null(struct rtwn_softc *, void *, int, int, int); uint8_t r92c_tx_radiotap_flags(const void *); #endif /* RTL8192C_H */ Index: projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_init.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_init.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_init.c (revision 312720) @@ -1,317 +1,343 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int r92c_check_condition(struct rtwn_softc *sc, const uint8_t cond[]) { struct r92c_softc *rs = sc->sc_priv; uint8_t mask; int i; if (cond[0] == 0) return (1); RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "%s: condition byte 0: %02X; chip %02X, board %02X\n", __func__, cond[0], rs->chip, rs->board_type); if (!(rs->chip & R92C_CHIP_92C)) { if (rs->board_type == R92C_BOARD_TYPE_HIGHPA) mask = R92C_COND_RTL8188RU; else if (rs->board_type == R92C_BOARD_TYPE_MINICARD) mask = R92C_COND_RTL8188CE; else mask = R92C_COND_RTL8188CU; } else { if (rs->board_type == R92C_BOARD_TYPE_MINICARD) mask = R92C_COND_RTL8192CE; else mask = R92C_COND_RTL8192CU; } for (i = 0; i < RTWN_MAX_CONDITIONS && cond[i] != 0; i++) if ((cond[i] & mask) == mask) return (1); return (0); } int +r92c_llt_init(struct rtwn_softc *sc) +{ + int i, error; + + /* Reserve pages [0; page_count]. */ + for (i = 0; i < sc->page_count; i++) { + if ((error = r92c_llt_write(sc, i, i + 1)) != 0) + return (error); + } + /* NB: 0xff indicates end-of-list. */ + if ((error = r92c_llt_write(sc, i, 0xff)) != 0) + return (error); + /* + * Use pages [page_count + 1; pktbuf_count - 1] + * as ring buffer. + */ + for (++i; i < sc->pktbuf_count - 1; i++) { + if ((error = r92c_llt_write(sc, i, i + 1)) != 0) + return (error); + } + /* Make the last page point to the beginning of the ring buffer. */ + error = r92c_llt_write(sc, i, sc->page_count + 1); + return (error); +} + +int r92c_set_page_size(struct rtwn_softc *sc) { return (rtwn_write_1(sc, R92C_PBP, SM(R92C_PBP_PSRX, R92C_PBP_128) | SM(R92C_PBP_PSTX, R92C_PBP_128)) == 0); } void r92c_init_bb_common(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; int i, j; /* Write BB initialization values. */ for (i = 0; i < sc->bb_size; i++) { const struct rtwn_bb_prog *bb_prog = &sc->bb_prog[i]; while (!rtwn_check_condition(sc, bb_prog->cond)) { KASSERT(bb_prog->next != NULL, ("%s: wrong condition value (i %d)\n", __func__, i)); bb_prog = bb_prog->next; } for (j = 0; j < bb_prog->count; j++) { RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "BB: reg 0x%03x, val 0x%08x\n", bb_prog->reg[j], bb_prog->val[j]); rtwn_bb_write(sc, bb_prog->reg[j], bb_prog->val[j]); rtwn_delay(sc, 1); } } if (rs->chip & R92C_CHIP_92C_1T2R) { /* 8192C 1T only configuration. */ rtwn_bb_setbits(sc, R92C_FPGA0_TXINFO, 0x03, 0x02); rtwn_bb_setbits(sc, R92C_FPGA1_TXINFO, 0x300033, 0x200022); rtwn_bb_setbits(sc, R92C_CCK0_AFESETTING, 0xff000000, 0x45000000); rtwn_bb_setbits(sc, R92C_OFDM0_TRXPATHENA, 0xff, 0x23); rtwn_bb_setbits(sc, R92C_OFDM0_AGCPARAM1, 0x30, 0x10); rtwn_bb_setbits(sc, 0xe74, 0x0c000000, 0x08000000); rtwn_bb_setbits(sc, 0xe78, 0x0c000000, 0x08000000); rtwn_bb_setbits(sc, 0xe7c, 0x0c000000, 0x08000000); rtwn_bb_setbits(sc, 0xe80, 0x0c000000, 0x08000000); rtwn_bb_setbits(sc, 0xe88, 0x0c000000, 0x08000000); } /* Write AGC values. */ for (i = 0; i < sc->agc_size; i++) { const struct rtwn_agc_prog *agc_prog = &sc->agc_prog[i]; while (!rtwn_check_condition(sc, agc_prog->cond)) { KASSERT(agc_prog->next != NULL, ("%s: wrong condition value (2) (i %d)\n", __func__, i)); agc_prog = agc_prog->next; } for (j = 0; j < agc_prog->count; j++) { RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "AGC: val 0x%08x\n", agc_prog->val[j]); rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, agc_prog->val[j]); rtwn_delay(sc, 1); } } if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR) sc->sc_flags |= RTWN_FLAG_CCK_HIPWR; } int r92c_init_rf_chain(struct rtwn_softc *sc, const struct rtwn_rf_prog *rf_prog, int chain) { int i, j; RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "%s: chain %d\n", __func__, chain); for (i = 0; rf_prog[i].reg != NULL; i++) { const struct rtwn_rf_prog *prog = &rf_prog[i]; while (!rtwn_check_condition(sc, prog->cond)) { KASSERT(prog->next != NULL, ("%s: wrong condition value (i %d)\n", __func__, i)); prog = prog->next; } for (j = 0; j < prog->count; j++) { RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "RF: reg 0x%02x, val 0x%05x\n", prog->reg[j], prog->val[j]); /* * These are fake RF registers offsets that * indicate a delay is required. */ /* NB: we are using 'value' to store required delay. */ if (prog->reg[j] > 0xf8) { rtwn_delay(sc, prog->val[j]); continue; } rtwn_rf_write(sc, chain, prog->reg[j], prog->val[j]); rtwn_delay(sc, 1); } } return (i); } void r92c_init_rf(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; uint32_t reg, type; int i, chain, idx, off; for (chain = 0, i = 0; chain < sc->nrxchains; chain++, i++) { /* Save RF_ENV control type. */ idx = chain / 2; off = (chain % 2) * 16; reg = rtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); type = (reg >> off) & 0x10; /* Set RF_ENV enable. */ rtwn_bb_setbits(sc, R92C_FPGA0_RFIFACEOE(chain), 0, 0x100000); rtwn_delay(sc, 1); /* Set RF_ENV output high. */ rtwn_bb_setbits(sc, R92C_FPGA0_RFIFACEOE(chain), 0, 0x10); rtwn_delay(sc, 1); /* Set address and data lengths of RF registers. */ rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(chain), R92C_HSSI_PARAM2_ADDR_LENGTH, 0); rtwn_delay(sc, 1); rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(chain), R92C_HSSI_PARAM2_DATA_LENGTH, 0); rtwn_delay(sc, 1); /* Write RF initialization values for this chain. */ i += r92c_init_rf_chain(sc, &sc->rf_prog[i], chain); /* Restore RF_ENV control type. */ rtwn_bb_setbits(sc, R92C_FPGA0_RFIFACESW(idx), 0x10 << off, type << off); /* Cache RF register CHNLBW. */ rs->rf_chnlbw[chain] = rtwn_rf_read(sc, chain, R92C_RF_CHNLBW); } if ((rs->chip & (R92C_CHIP_UMC_A_CUT | R92C_CHIP_92C)) == R92C_CHIP_UMC_A_CUT) { rtwn_rf_write(sc, 0, R92C_RF_RX_G1, 0x30255); rtwn_rf_write(sc, 0, R92C_RF_RX_G2, 0x50a00); } /* Turn CCK and OFDM blocks on. */ rtwn_bb_setbits(sc, R92C_FPGA0_RFMOD, 0, R92C_RFMOD_CCK_EN); rtwn_bb_setbits(sc, R92C_FPGA0_RFMOD, 0, R92C_RFMOD_OFDM_EN); } void r92c_init_edca(struct rtwn_softc *sc) { /* SIFS */ rtwn_write_2(sc, R92C_SPEC_SIFS, 0x100a); rtwn_write_2(sc, R92C_MAC_SPEC_SIFS, 0x100a); rtwn_write_2(sc, R92C_SIFS_CCK, 0x100a); rtwn_write_2(sc, R92C_SIFS_OFDM, 0x100a); /* TXOP */ rtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x005ea42b); rtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a44f); rtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005ea324); rtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002fa226); } void r92c_init_ampdu(struct rtwn_softc *sc) { /* Setup AMPDU aggregation. */ rtwn_write_4(sc, R92C_AGGLEN_LMT, 0x99997631); /* MCS7~0 */ rtwn_write_1(sc, R92C_AGGR_BREAK_TIME, 0x16); rtwn_write_2(sc, R92C_MAX_AGGR_NUM, 0x0708); } void r92c_init_antsel(struct rtwn_softc *sc) { uint32_t reg; if (sc->ntxchains != 1 || sc->nrxchains != 1) return; rtwn_setbits_1(sc, R92C_LEDCFG2, 0, 0x80); rtwn_bb_setbits(sc, R92C_FPGA0_RFPARAM(0), 0, 0x2000); reg = rtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(0)); sc->sc_ant = MS(reg, R92C_FPGA0_RFIFACEOE0_ANT); /* XXX */ } void r92c_pa_bias_init(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; int i; for (i = 0; i < sc->nrxchains; i++) { if (rs->pa_setting & (1 << i)) continue; r92c_rf_write(sc, i, R92C_RF_IPA, 0x0f406); r92c_rf_write(sc, i, R92C_RF_IPA, 0x4f406); r92c_rf_write(sc, i, R92C_RF_IPA, 0x8f406); r92c_rf_write(sc, i, R92C_RF_IPA, 0xcf406); } if (!(rs->pa_setting & 0x10)) rtwn_setbits_1(sc, 0x16, 0xf0, 0x90); } Index: projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_llt.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_llt.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_llt.c (revision 312720) @@ -0,0 +1,73 @@ +/* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ + +/*- + * Copyright (c) 2010 Damien Bergamini + * Copyright (c) 2014 Kevin Lo + * Copyright (c) 2015-2016 Andriy Voskoboinyk + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +int +r92c_llt_write(struct rtwn_softc *sc, uint32_t addr, uint32_t data) +{ + int ntries, error; + + error = rtwn_write_4(sc, R92C_LLT_INIT, + SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) | + SM(R92C_LLT_INIT_ADDR, addr) | + SM(R92C_LLT_INIT_DATA, data)); + if (error != 0) + return (error); + /* Wait for write operation to complete. */ + for (ntries = 0; ntries < 20; ntries++) { + if (MS(rtwn_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) == + R92C_LLT_INIT_OP_NO_ACTIVE) + return (0); + rtwn_delay(sc, 10); + } + return (ETIMEDOUT); +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_llt.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_reg.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_reg.h (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8192c/r92c_reg.h (revision 312720) @@ -1,857 +1,879 @@ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2015 Stefan Sperling * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: if_urtwnreg.h,v 1.3 2010/11/16 18:02:59 damien Exp $ * $FreeBSD$ */ #ifndef R92C_REG_H #define R92C_REG_H /* * MAC registers. */ /* System Configuration. */ #define R92C_SYS_ISO_CTRL 0x000 #define R92C_SYS_FUNC_EN 0x002 #define R92C_APS_FSMCO 0x004 #define R92C_SYS_CLKR 0x008 #define R92C_AFE_MISC 0x010 #define R92C_SPS0_CTRL 0x011 #define R92C_SPS_OCP_CFG 0x018 #define R92C_RSV_CTRL 0x01c #define R92C_RF_CTRL 0x01f #define R92C_LDOA15_CTRL 0x020 #define R92C_LDOV12D_CTRL 0x021 #define R92C_LDOHCI12_CTRL 0x022 #define R92C_LPLDO_CTRL 0x023 #define R92C_AFE_XTAL_CTRL 0x024 #define R92C_AFE_PLL_CTRL 0x028 #define R92C_APE_PLL_CTRL_EXT 0x02c #define R92C_MAC_PHY_CTRL R92C_APE_PLL_CTRL_EXT #define R92C_EFUSE_CTRL 0x030 #define R92C_EFUSE_TEST 0x034 #define R92C_PWR_DATA 0x038 #define R92C_CAL_TIMER 0x03c #define R92C_ACLK_MON 0x03e #define R92C_GPIO_MUXCFG 0x040 #define R92C_GPIO_IO_SEL 0x042 #define R92C_MAC_PINMUX_CFG 0x043 #define R92C_GPIO_PIN_CTRL 0x044 #define R92C_GPIO_IN 0x044 #define R92C_GPIO_OUT 0x045 #define R92C_GPIO_IOSEL 0x046 #define R92C_GPIO_MOD 0x047 #define R92C_GPIO_INTM 0x048 #define R92C_LEDCFG0 0x04c #define R92C_LEDCFG1 0x04d #define R92C_LEDCFG2 0x04e #define R92C_LEDCFG3 0x04f #define R92C_FSIMR 0x050 #define R92C_FSISR 0x054 #define R92C_HSIMR 0x058 #define R92C_HSISR 0x05c #define R92C_MULTI_FUNC_CTRL 0x068 +#define R92C_LDO_SWR_CTRL 0x07c #define R92C_MCUFWDL 0x080 #define R92C_HMEBOX_EXT(idx) (0x088 + (idx) * 2) #define R92C_EFUSE_ACCESS 0x0cf #define R92C_BIST_SCAN 0x0d0 #define R92C_BIST_RPT 0x0d4 #define R92C_BIST_ROM_RPT 0x0d8 #define R92C_HPON_FSM 0x0ec #define R92C_SYS_CFG 0x0f0 #define R92C_TYPE_ID 0x0fc /* MAC General Configuration. */ #define R92C_CR 0x100 #define R92C_MSR 0x102 #define R92C_PBP 0x104 #define R92C_TRXDMA_CTRL 0x10c #define R92C_TRXFF_BNDY 0x114 #define R92C_TRXFF_STATUS 0x118 #define R92C_RXFF_PTR 0x11c #define R92C_HIMR 0x120 #define R92C_HISR 0x124 #define R92C_HIMRE 0x128 #define R92C_HISRE 0x12c #define R92C_CPWM 0x12f #define R92C_FWIMR 0x130 #define R92C_FWISR 0x134 #define R92C_PKTBUF_DBG_CTRL 0x140 #define R92C_PKTBUF_DBG_DATA_L 0x144 #define R92C_PKTBUF_DBG_DATA_H 0x148 #define R92C_TC0_CTRL(i) (0x150 + (i) * 4) #define R92C_TCUNIT_BASE 0x164 #define R92C_MBIST_START 0x174 #define R92C_MBIST_DONE 0x178 #define R92C_MBIST_FAIL 0x17c #define R92C_C2H_EVT_MSG 0x1a0 #define R92C_C2H_EVT_CLEAR 0x1af #define R92C_C2H_EVT_MSG_TEST 0x1b8 #define R92C_MCUTST_1 0x1c0 #define R92C_FMETHR 0x1c8 #define R92C_HMETFR 0x1cc #define R92C_HMEBOX(idx) (0x1d0 + (idx) * 4) #define R92C_LLT_INIT 0x1e0 #define R92C_BB_ACCESS_CTRL 0x1e8 #define R92C_BB_ACCESS_DATA 0x1ec /* Tx DMA Configuration. */ #define R92C_RQPN 0x200 #define R92C_FIFOPAGE 0x204 #define R92C_TDECTRL 0x208 #define R92C_TXDMA_OFFSET_CHK 0x20c #define R92C_TXDMA_STATUS 0x210 #define R92C_RQPN_NPQ 0x214 +#define R92C_AUTO_LLT 0x224 /* Rx DMA Configuration. */ #define R92C_RXDMA_AGG_PG_TH 0x280 #define R92C_RXPKT_NUM 0x284 #define R92C_RXDMA_STATUS 0x288 /* Protocol Configuration. */ #define R92C_VOQ_INFORMATION 0x400 #define R92C_VIQ_INFORMATION 0x404 #define R92C_BEQ_INFORMATION 0x408 #define R92C_BKQ_INFORMATION 0x40c #define R92C_MGQ_INFORMATION 0x410 #define R92C_HGQ_INFORMATION 0x414 #define R92C_BCNQ_INFORMATION 0x418 #define R92C_CPU_MGQ_INFORMATION 0x41c #define R92C_FWHW_TXQ_CTRL 0x420 #define R92C_HWSEQ_CTRL 0x423 #define R92C_TXPKTBUF_BCNQ_BDNY 0x424 #define R92C_TXPKTBUF_MGQ_BDNY 0x425 #define R92C_SPEC_SIFS 0x428 #define R92C_RL 0x42a #define R92C_DARFRC 0x430 #define R92C_RARFRC 0x438 #define R92C_RRSR 0x440 #define R92C_ARFR(i) (0x444 + (i) * 4) #define R92C_AGGLEN_LMT 0x458 #define R92C_AMPDU_MIN_SPACE 0x45c #define R92C_TXPKTBUF_WMAC_LBK_BF_HD 0x45d #define R92C_FAST_EDCA_CTRL 0x460 #define R92C_RD_RESP_PKT_TH 0x463 #define R92C_INIRTS_RATE_SEL 0x480 #define R92C_INIDATA_RATE_SEL(macid) (0x484 + (macid)) #define R92C_QUEUE_CTRL 0x4c6 #define R92C_MAX_AGGR_NUM 0x4ca #define R92C_BAR_MODE_CTRL 0x4cc /* EDCA Configuration. */ #define R92C_EDCA_VO_PARAM 0x500 #define R92C_EDCA_VI_PARAM 0x504 #define R92C_EDCA_BE_PARAM 0x508 #define R92C_EDCA_BK_PARAM 0x50c #define R92C_BCNTCFG 0x510 #define R92C_PIFS 0x512 #define R92C_RDG_PIFS 0x513 #define R92C_SIFS_CCK 0x514 #define R92C_SIFS_OFDM 0x516 #define R92C_AGGR_BREAK_TIME 0x51a #define R92C_SLOT 0x51b #define R92C_TX_PTCL_CTRL 0x520 #define R92C_TXPAUSE 0x522 #define R92C_DIS_TXREQ_CLR 0x523 #define R92C_RD_CTRL 0x524 #define R92C_TBTT_PROHIBIT 0x540 #define R92C_RD_NAV_NXT 0x544 #define R92C_NAV_PROT_LEN 0x546 #define R92C_BCN_CTRL(id) ((id) + 0x550) /* WARNING: R92C_USTIME_TSF == 0x55c, not 0x551 */ #define R92C_MBID_NUM 0x552 #define R92C_DUAL_TSF_RST 0x553 #define R92C_BCN_INTERVAL(id) (0x554 + (id) * 2) #define R92C_DRVERLYINT 0x558 #define R92C_BCNDMATIM 0x559 #define R92C_ATIMWND 0x55a #define R92C_USTIME_TSF 0x55c #define R92C_BCN_MAX_ERR 0x55d #define R92C_RXTSF_OFFSET_CCK 0x55e #define R92C_RXTSF_OFFSET_OFDM 0x55f #define R92C_TSFTR(i) (0x560 + (i) * 8) #define R92C_PSTIMER 0x580 #define R92C_TIMER0 0x584 #define R92C_TIMER1 0x588 #define R92C_ACMHWCTRL 0x5c0 #define R92C_ACMRSTCTRL 0x5c1 #define R92C_ACMAVG 0x5c2 #define R92C_VO_ADMTIME 0x5c4 #define R92C_VI_ADMTIME 0x5c6 #define R92C_BE_ADMTIME 0x5c8 #define R92C_EDCA_RANDOM_GEN 0x5cc #define R92C_SCH_TXCMD 0x5d0 /* WMAC Configuration. */ #define R92C_APSD_CTRL 0x600 #define R92C_BWOPMODE 0x603 #define R92C_TCR 0x604 #define R92C_RCR 0x608 #define R92C_RX_PKT_LIMIT 0x60c #define R92C_RX_DRVINFO_SZ 0x60f #define R92C_MACID0 0x610 #define R92C_BSSID0 0x618 #define R92C_MAR 0x620 #define R92C_USTIME_EDCA 0x638 #define R92C_MAC_SPEC_SIFS 0x63a #define R92C_R2T_SIFS 0x63c #define R92C_T2T_SIFS 0x63e #define R92C_ACKTO 0x640 #define R92C_NAV_UPPER 0x652 #define R92C_WMAC_TRXPTCL_CTL 0x668 #define R92C_CAMCMD 0x670 #define R92C_CAMWRITE 0x674 #define R92C_CAMREAD 0x678 #define R92C_CAMDBG 0x67c #define R92C_SECCFG 0x680 #define R92C_RXFLTMAP0 0x6a0 #define R92C_RXFLTMAP1 0x6a2 #define R92C_RXFLTMAP2 0x6a4 #define R92C_BCN_PSR_RPT 0x6a8 #define R92C_MACID1 0x700 #define R92C_BSSID1 0x708 #define R92C_MACID(id) ((id) == 0 ? R92C_MACID0 : R92C_MACID1) #define R92C_BSSID(id) ((id) == 0 ? R92C_BSSID0 : R92C_BSSID1) /* Bits for R92C_SYS_ISO_CTRL. */ #define R92C_SYS_ISO_CTRL_MD2PP 0x0001 #define R92C_SYS_ISO_CTRL_UA2USB 0x0002 #define R92C_SYS_ISO_CTRL_UD2CORE 0x0004 #define R92C_SYS_ISO_CTRL_PA2PCIE 0x0008 #define R92C_SYS_ISO_CTRL_PD2CORE 0x0010 #define R92C_SYS_ISO_CTRL_IP2MAC 0x0020 #define R92C_SYS_ISO_CTRL_DIOP 0x0040 #define R92C_SYS_ISO_CTRL_DIOE 0x0080 #define R92C_SYS_ISO_CTRL_EB2CORE 0x0100 #define R92C_SYS_ISO_CTRL_DIOR 0x0200 #define R92C_SYS_ISO_CTRL_PWC_EV25V 0x4000 #define R92C_SYS_ISO_CTRL_PWC_EV12V 0x8000 /* Bits for R92C_SYS_FUNC_EN. */ #define R92C_SYS_FUNC_EN_BBRSTB 0x0001 #define R92C_SYS_FUNC_EN_BB_GLB_RST 0x0002 #define R92C_SYS_FUNC_EN_USBA 0x0004 #define R92C_SYS_FUNC_EN_UPLL 0x0008 #define R92C_SYS_FUNC_EN_USBD 0x0010 #define R92C_SYS_FUNC_EN_DIO_PCIE 0x0020 #define R92C_SYS_FUNC_EN_PCIEA 0x0040 #define R92C_SYS_FUNC_EN_PPLL 0x0080 #define R92C_SYS_FUNC_EN_PCIED 0x0100 #define R92C_SYS_FUNC_EN_DIOE 0x0200 #define R92C_SYS_FUNC_EN_CPUEN 0x0400 #define R92C_SYS_FUNC_EN_DCORE 0x0800 #define R92C_SYS_FUNC_EN_ELDR 0x1000 #define R92C_SYS_FUNC_EN_DIO_RF 0x2000 #define R92C_SYS_FUNC_EN_HWPDN 0x4000 #define R92C_SYS_FUNC_EN_MREGEN 0x8000 /* Bits for R92C_APS_FSMCO. */ #define R92C_APS_FSMCO_PFM_LDALL 0x00000001 #define R92C_APS_FSMCO_PFM_ALDN 0x00000002 #define R92C_APS_FSMCO_PFM_LDKP 0x00000004 #define R92C_APS_FSMCO_PFM_WOWL 0x00000008 #define R92C_APS_FSMCO_PDN_EN 0x00000010 #define R92C_APS_FSMCO_PDN_PL 0x00000020 #define R92C_APS_FSMCO_APFM_ONMAC 0x00000100 #define R92C_APS_FSMCO_APFM_OFF 0x00000200 #define R92C_APS_FSMCO_APFM_RSM 0x00000400 #define R92C_APS_FSMCO_AFSM_HSUS 0x00000800 #define R92C_APS_FSMCO_AFSM_PCIE 0x00001000 #define R92C_APS_FSMCO_APDM_MAC 0x00002000 #define R92C_APS_FSMCO_APDM_HOST 0x00004000 #define R92C_APS_FSMCO_APDM_HPDN 0x00008000 #define R92C_APS_FSMCO_RDY_MACON 0x00010000 #define R92C_APS_FSMCO_SUS_HOST 0x00020000 #define R92C_APS_FSMCO_ROP_ALD 0x00100000 #define R92C_APS_FSMCO_ROP_PWR 0x00200000 #define R92C_APS_FSMCO_ROP_SPS 0x00400000 #define R92C_APS_FSMCO_SOP_MRST 0x02000000 #define R92C_APS_FSMCO_SOP_FUSE 0x04000000 #define R92C_APS_FSMCO_SOP_ABG 0x08000000 #define R92C_APS_FSMCO_SOP_AMB 0x10000000 #define R92C_APS_FSMCO_SOP_RCK 0x20000000 #define R92C_APS_FSMCO_SOP_A8M 0x40000000 #define R92C_APS_FSMCO_XOP_BTCK 0x80000000 /* Bits for R92C_SYS_CLKR. */ #define R92C_SYS_CLKR_ANAD16V_EN 0x00000001 #define R92C_SYS_CLKR_ANA8M 0x00000002 #define R92C_SYS_CLKR_MACSLP 0x00000010 #define R92C_SYS_CLKR_LOADER_EN 0x00000020 #define R92C_SYS_CLKR_80M_SSC_DIS 0x00000080 #define R92C_SYS_CLKR_80M_SSC_EN_HO 0x00000100 #define R92C_SYS_CLKR_PHY_SSC_RSTB 0x00000200 #define R92C_SYS_CLKR_SEC_EN 0x00000400 #define R92C_SYS_CLKR_MAC_EN 0x00000800 #define R92C_SYS_CLKR_SYS_EN 0x00001000 #define R92C_SYS_CLKR_RING_EN 0x00002000 +/* Bits for R92C_RSV_CTRL. */ +#define R92C_RSV_CTRL_WLOCK_ALL 0x01 +#define R92C_RSV_CTRL_WLOCK_00 0x02 +#define R92C_RSV_CTRL_WLOCK_04 0x04 +#define R92C_RSV_CTRL_WLOCK_08 0x08 +#define R92C_RSV_CTRL_WLOCK_40 0x10 +#define R92C_RSV_CTRL_R_DIS_PRST_0 0x20 +#define R92C_RSV_CTRL_R_DIS_PRST_1 0x40 +#define R92C_RSV_CTRL_LOCK_ALL_EN 0x80 + /* Bits for R92C_RF_CTRL. */ #define R92C_RF_CTRL_EN 0x01 #define R92C_RF_CTRL_RSTB 0x02 #define R92C_RF_CTRL_SDMRSTB 0x04 /* Bits for R92C_LDOA15_CTRL. */ #define R92C_LDOA15_CTRL_EN 0x01 #define R92C_LDOA15_CTRL_STBY 0x02 #define R92C_LDOA15_CTRL_OBUF 0x04 #define R92C_LDOA15_CTRL_REG_VOS 0x08 /* Bits for R92C_LDOV12D_CTRL. */ #define R92C_LDOV12D_CTRL_LDV12_EN 0x01 /* Bits for R92C_LPLDO_CTRL. */ #define R92C_LPLDO_CTRL_SLEEP 0x10 /* Bits for R92C_AFE_XTAL_CTRL. */ #define R92C_AFE_XTAL_CTRL_ADDR_M 0x007ff800 #define R92C_AFE_XTAL_CTRL_ADDR_S 11 /* Bits for R92C_AFE_PLL_CTRL. */ #define R92C_AFE_PLL_CTRL_EN 0x0001 #define R92C_AFE_PLL_CTRL_320_EN 0x0002 #define R92C_AFE_PLL_CTRL_FREF_SEL 0x0004 #define R92C_AFE_PLL_CTRL_EDGE_SEL 0x0008 #define R92C_AFE_PLL_CTRL_WDOGB 0x0010 #define R92C_AFE_PLL_CTRL_LPFEN 0x0020 /* Bits for R92C_EFUSE_CTRL. */ #define R92C_EFUSE_CTRL_DATA_M 0x000000ff #define R92C_EFUSE_CTRL_DATA_S 0 #define R92C_EFUSE_CTRL_ADDR_M 0x0003ff00 #define R92C_EFUSE_CTRL_ADDR_S 8 #define R92C_EFUSE_CTRL_VALID 0x80000000 /* Bits for R92C_GPIO_MUXCFG. */ #define R92C_GPIO_MUXCFG_ENBT 0x0020 /* Bits for R92C_LEDCFG0. */ #define R92C_LEDCFG0_DIS 0x08 +/* Bits for R92C_LEDCFG1. */ +#define R92C_LEDCFG1_DIS 0x80 + /* Bits for R92C_MULTI_FUNC_CTRL. */ #define R92C_MULTI_BT_FUNC_EN 0x00040000 /* Bits for R92C_MCUFWDL. */ #define R92C_MCUFWDL_EN 0x00000001 #define R92C_MCUFWDL_RDY 0x00000002 #define R92C_MCUFWDL_CHKSUM_RPT 0x00000004 #define R92C_MCUFWDL_MACINI_RDY 0x00000008 #define R92C_MCUFWDL_BBINI_RDY 0x00000010 #define R92C_MCUFWDL_RFINI_RDY 0x00000020 #define R92C_MCUFWDL_WINTINI_RDY 0x00000040 #define R92C_MCUFWDL_RAM_DL_SEL 0x00000080 /* 1: RAM, 0: ROM */ #define R92C_MCUFWDL_PAGE_M 0x00070000 #define R92C_MCUFWDL_PAGE_S 16 #define R92C_MCUFWDL_ROM_DLEN 0x00080000 #define R92C_MCUFWDL_CPRST 0x00800000 /* Bits for R92C_EFUSE_ACCESS. */ #define R92C_EFUSE_ACCESS_OFF 0x00 #define R92C_EFUSE_ACCESS_ON 0x69 /* Bits for R92C_HPON_FSM. */ #define R92C_HPON_FSM_CHIP_BONDING_ID_S 22 #define R92C_HPON_FSM_CHIP_BONDING_ID_M 0x00c00000 #define R92C_HPON_FSM_CHIP_BONDING_ID_92C_1T2R 1 /* Bits for R92C_SYS_CFG. */ #define R92C_SYS_CFG_XCLK_VLD 0x00000001 #define R92C_SYS_CFG_ACLK_VLD 0x00000002 #define R92C_SYS_CFG_UCLK_VLD 0x00000004 #define R92C_SYS_CFG_PCLK_VLD 0x00000008 #define R92C_SYS_CFG_PCIRSTB 0x00000010 #define R92C_SYS_CFG_V15_VLD 0x00000020 #define R92C_SYS_CFG_TRP_B15V_EN 0x00000080 #define R92C_SYS_CFG_SIC_IDLE 0x00000100 #define R92C_SYS_CFG_BD_MAC2 0x00000200 #define R92C_SYS_CFG_BD_MAC1 0x00000400 #define R92C_SYS_CFG_IC_MACPHY_MODE 0x00000800 #define R92C_SYS_CFG_CHIP_VER_RTL_M 0x0000f000 #define R92C_SYS_CFG_CHIP_VER_RTL_S 12 #define R92C_SYS_CFG_BT_FUNC 0x00010000 #define R92C_SYS_CFG_VENDOR_UMC 0x00080000 #define R92C_SYS_CFG_PAD_HWPD_IDN 0x00400000 #define R92C_SYS_CFG_TRP_VAUX_EN 0x00800000 #define R92C_SYS_CFG_TRP_BT_EN 0x01000000 #define R92C_SYS_CFG_BD_PKG_SEL 0x02000000 #define R92C_SYS_CFG_BD_HCI_SEL 0x04000000 #define R92C_SYS_CFG_TYPE_92C 0x08000000 /* Bits for R92C_CR. */ #define R92C_CR_HCI_TXDMA_EN 0x0001 #define R92C_CR_HCI_RXDMA_EN 0x0002 #define R92C_CR_TXDMA_EN 0x0004 #define R92C_CR_RXDMA_EN 0x0008 #define R92C_CR_PROTOCOL_EN 0x0010 #define R92C_CR_SCHEDULE_EN 0x0020 #define R92C_CR_MACTXEN 0x0040 #define R92C_CR_MACRXEN 0x0080 #define R92C_CR_ENSWBCN 0x0100 #define R92C_CR_ENSEC 0x0200 #define R92C_CR_CALTMR_EN 0x0400 /* Bits for R92C_MSR. */ #define R92C_MSR_NOLINK 0x00 #define R92C_MSR_ADHOC 0x01 #define R92C_MSR_INFRA 0x02 #define R92C_MSR_AP 0x03 #define R92C_MSR_MASK (R92C_MSR_AP) /* Bits for R92C_PBP. */ #define R92C_PBP_PSRX_M 0x0f #define R92C_PBP_PSRX_S 0 #define R92C_PBP_PSTX_M 0xf0 #define R92C_PBP_PSTX_S 4 #define R92C_PBP_64 0 #define R92C_PBP_128 1 #define R92C_PBP_256 2 #define R92C_PBP_512 3 #define R92C_PBP_1024 4 /* Bits for R92C_TRXDMA_CTRL. */ #define R92C_TRXDMA_CTRL_RX_SHIFT_EN 0x0002 #define R92C_TRXDMA_CTRL_RXDMA_AGG_EN 0x0004 #define R92C_TRXDMA_CTRL_TXDMA_VOQ_MAP_M 0x0030 #define R92C_TRXDMA_CTRL_TXDMA_VOQ_MAP_S 4 #define R92C_TRXDMA_CTRL_TXDMA_VIQ_MAP_M 0x00c0 #define R92C_TRXDMA_CTRL_TXDMA_VIQ_MAP_S 6 #define R92C_TRXDMA_CTRL_TXDMA_BEQ_MAP_M 0x0300 #define R92C_TRXDMA_CTRL_TXDMA_BEQ_MAP_S 8 #define R92C_TRXDMA_CTRL_TXDMA_BKQ_MAP_M 0x0c00 #define R92C_TRXDMA_CTRL_TXDMA_BKQ_MAP_S 10 #define R92C_TRXDMA_CTRL_TXDMA_MGQ_MAP_M 0x3000 #define R92C_TRXDMA_CTRL_TXDMA_MGQ_MAP_S 12 #define R92C_TRXDMA_CTRL_TXDMA_HIQ_MAP_M 0xc000 #define R92C_TRXDMA_CTRL_TXDMA_HIQ_MAP_S 14 #define R92C_TRXDMA_CTRL_QUEUE_LOW 1 #define R92C_TRXDMA_CTRL_QUEUE_NORMAL 2 #define R92C_TRXDMA_CTRL_QUEUE_HIGH 3 #define R92C_TRXDMA_CTRL_QMAP_M 0xfff0 /* Shortcuts. */ #define R92C_TRXDMA_CTRL_QMAP_3EP 0xf5b0 #define R92C_TRXDMA_CTRL_QMAP_HQ_LQ 0xf5f0 #define R92C_TRXDMA_CTRL_QMAP_HQ_NQ 0xfaf0 #define R92C_TRXDMA_CTRL_QMAP_LQ 0x5550 #define R92C_TRXDMA_CTRL_QMAP_NQ 0xaaa0 #define R92C_TRXDMA_CTRL_QMAP_HQ 0xfff0 /* Bits for R92C_C2H_EVT_CLEAR. */ #define R92C_C2H_EVT_HOST_CLOSE 0x00 #define R92C_C2H_EVT_FW_CLOSE 0xff /* Bits for R92C_LLT_INIT. */ #define R92C_LLT_INIT_DATA_M 0x000000ff #define R92C_LLT_INIT_DATA_S 0 #define R92C_LLT_INIT_ADDR_M 0x0000ff00 #define R92C_LLT_INIT_ADDR_S 8 #define R92C_LLT_INIT_OP_M 0xc0000000 #define R92C_LLT_INIT_OP_S 30 #define R92C_LLT_INIT_OP_NO_ACTIVE 0 #define R92C_LLT_INIT_OP_WRITE 1 /* Bits for R92C_RQPN. */ #define R92C_RQPN_HPQ_M 0x000000ff #define R92C_RQPN_HPQ_S 0 #define R92C_RQPN_LPQ_M 0x0000ff00 #define R92C_RQPN_LPQ_S 8 #define R92C_RQPN_PUBQ_M 0x00ff0000 #define R92C_RQPN_PUBQ_S 16 #define R92C_RQPN_LD 0x80000000 /* Bits for R92C_TDECTRL. */ #define R92C_TDECTRL_BLK_DESC_NUM_M 0x000000f0 #define R92C_TDECTRL_BLK_DESC_NUM_S 4 #define R92C_TDECTRL_BCN_VALID 0x00010000 /* Bits for R92C_TXDMA_OFFSET_CHK. */ #define R92C_TXDMA_OFFSET_DROP_DATA_EN 0x00000200 +/* Bits for R92C_AUTO_LLT. */ +#define R92C_AUTO_LLT_INIT 0x00010000 + /* Bits for R92C_FWHW_TXQ_CTRL. */ #define R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW 0x80 #define R92C_FWHW_TXQ_CTRL_REAL_BEACON 0x400000 /* Bits for R92C_SPEC_SIFS. */ #define R92C_SPEC_SIFS_CCK_M 0x00ff #define R92C_SPEC_SIFS_CCK_S 0 #define R92C_SPEC_SIFS_OFDM_M 0xff00 #define R92C_SPEC_SIFS_OFDM_S 8 /* Bits for R92C_RL. */ #define R92C_RL_LRL_M 0x003f #define R92C_RL_LRL_S 0 #define R92C_RL_SRL_M 0x3f00 #define R92C_RL_SRL_S 8 /* Size of R92C_DARFRC. */ #define R92C_DARFRC_SIZE 8 /* Bits for R92C_RRSR. */ #define R92C_RRSR_RATE_BITMAP_M 0x000fffff #define R92C_RRSR_RATE_BITMAP_S 0 #define R92C_RRSR_RATE_CCK_ONLY_1M 0xffff1 #define R92C_RRSR_RATE_ALL 0xfffff #define R92C_RRSR_RSC_LOWSUBCHNL 0x00200000 #define R92C_RRSR_RSC_UPSUBCHNL 0x00400000 #define R92C_RRSR_SHORT 0x00800000 /* Bits for R92C_EDCA_XX_PARAM. */ #define R92C_EDCA_PARAM_AIFS_M 0x000000ff #define R92C_EDCA_PARAM_AIFS_S 0 #define R92C_EDCA_PARAM_ECWMIN_M 0x00000f00 #define R92C_EDCA_PARAM_ECWMIN_S 8 #define R92C_EDCA_PARAM_ECWMAX_M 0x0000f000 #define R92C_EDCA_PARAM_ECWMAX_S 12 #define R92C_EDCA_PARAM_TXOP_M 0xffff0000 #define R92C_EDCA_PARAM_TXOP_S 16 /* Bits for R92C_HWSEQ_CTRL / R92C_TXPAUSE. */ #define R92C_TX_QUEUE_VO 0x01 #define R92C_TX_QUEUE_VI 0x02 #define R92C_TX_QUEUE_BE 0x04 #define R92C_TX_QUEUE_BK 0x08 #define R92C_TX_QUEUE_MGT 0x10 #define R92C_TX_QUEUE_HIGH 0x20 #define R92C_TX_QUEUE_BCN 0x40 /* Shortcuts. */ #define R92C_TX_QUEUE_AC \ (R92C_TX_QUEUE_VO | R92C_TX_QUEUE_VI | \ R92C_TX_QUEUE_BE | R92C_TX_QUEUE_BK) #define R92C_TX_QUEUE_ALL \ (R92C_TX_QUEUE_AC | R92C_TX_QUEUE_MGT | \ R92C_TX_QUEUE_HIGH | R92C_TX_QUEUE_BCN | 0x80) /* XXX */ /* Bits for R92C_BCN_CTRL. */ #define R92C_BCN_CTRL_EN_MBSSID 0x02 #define R92C_BCN_CTRL_TXBCN_RPT 0x04 #define R92C_BCN_CTRL_EN_BCN 0x08 #define R92C_BCN_CTRL_DIS_TSF_UDT0 0x10 /* Bits for R92C_DUAL_TSF_RST. */ #define R92C_DUAL_TSF_RESET(id) (0x01 << (id)) #define R92C_DUAL_TSF_RST_TXOK 0x20 /* Bits for R92C_ACMHWCTRL. */ #define R92C_ACMHWCTRL_EN 0x01 #define R92C_ACMHWCTRL_BE 0x02 #define R92C_ACMHWCTRL_VI 0x04 #define R92C_ACMHWCTRL_VO 0x08 #define R92C_ACMHWCTRL_ACM_MASK 0x0f /* Bits for R92C_APSD_CTRL. */ #define R92C_APSD_CTRL_OFF 0x40 #define R92C_APSD_CTRL_OFF_STATUS 0x80 /* Bits for R92C_BWOPMODE. */ #define R92C_BWOPMODE_11J 0x01 #define R92C_BWOPMODE_5G 0x02 #define R92C_BWOPMODE_20MHZ 0x04 /* Bits for R92C_TCR. */ #define R92C_TCR_TSFRST 0x00000001 #define R92C_TCR_DIS_GCLK 0x00000002 #define R92C_TCR_PAD_SEL 0x00000004 #define R92C_TCR_PWR_ST 0x00000040 #define R92C_TCR_PWRBIT_OW_EN 0x00000080 #define R92C_TCR_ACRC 0x00000100 #define R92C_TCR_CFENDFORM 0x00000200 #define R92C_TCR_ICV 0x00000400 /* Bits for R92C_RCR. */ #define R92C_RCR_AAP 0x00000001 #define R92C_RCR_APM 0x00000002 #define R92C_RCR_AM 0x00000004 #define R92C_RCR_AB 0x00000008 #define R92C_RCR_ADD3 0x00000010 #define R92C_RCR_APWRMGT 0x00000020 #define R92C_RCR_CBSSID_DATA 0x00000040 #define R92C_RCR_CBSSID_BCN 0x00000080 #define R92C_RCR_ACRC32 0x00000100 #define R92C_RCR_AICV 0x00000200 #define R92C_RCR_ADF 0x00000800 #define R92C_RCR_ACF 0x00001000 #define R92C_RCR_AMF 0x00002000 #define R92C_RCR_HTC_LOC_CTRL 0x00004000 #define R92C_RCR_MFBEN 0x00400000 #define R92C_RCR_LSIGEN 0x00800000 #define R92C_RCR_ENMBID 0x01000000 #define R92C_RCR_APP_BA_SSN 0x08000000 #define R92C_RCR_APP_PHYSTS 0x10000000 #define R92C_RCR_APP_ICV 0x20000000 #define R92C_RCR_APP_MIC 0x40000000 #define R92C_RCR_APPFCS 0x80000000 /* Bits for R92C_RX_DRVINFO_SZ. */ /* XXX other values will not work */ #define R92C_RX_DRVINFO_SZ_DEF ((RTWN_PHY_STATUS_SIZE) / 8) /* Bits for R92C_WMAC_TRXPTCL_CTL. */ #define R92C_WMAC_TRXPTCL_SHPRE 0x00020000 /* Bits for R92C_CAMCMD. */ #define R92C_CAMCMD_ADDR_M 0x0000ffff #define R92C_CAMCMD_ADDR_S 0 #define R92C_CAMCMD_WRITE 0x00010000 #define R92C_CAMCMD_CLR 0x40000000 #define R92C_CAMCMD_POLLING 0x80000000 /* * CAM entries. */ #define R92C_CAM_CTL0(entry) ((entry) * 8 + 0) #define R92C_CAM_CTL1(entry) ((entry) * 8 + 1) #define R92C_CAM_KEY(entry, i) ((entry) * 8 + 2 + (i)) #define R92C_CAM_CTL6(entry) ((entry) * 8 + 6) #define R92C_CAM_CTL7(entry) ((entry) * 8 + 7) /* Bits for R92C_CAM_CTL0(i). */ #define R92C_CAM_KEYID_M 0x00000003 #define R92C_CAM_KEYID_S 0 #define R92C_CAM_ALGO_M 0x0000001c #define R92C_CAM_ALGO_S 2 #define R92C_CAM_ALGO_NONE 0 #define R92C_CAM_ALGO_WEP40 1 #define R92C_CAM_ALGO_TKIP 2 #define R92C_CAM_ALGO_AES 4 #define R92C_CAM_ALGO_WEP104 5 #define R92C_CAM_VALID 0x00008000 #define R92C_CAM_MACLO_M 0xffff0000 #define R92C_CAM_MACLO_S 16 /* Bits for R92C_SECCFG. */ #define R92C_SECCFG_TXUCKEY_DEF 0x0001 #define R92C_SECCFG_RXUCKEY_DEF 0x0002 #define R92C_SECCFG_TXENC_ENA 0x0004 #define R92C_SECCFG_RXDEC_ENA 0x0008 #define R92C_SECCFG_CMP_A2 0x0010 #define R92C_SECCFG_MC_SRCH_DIS 0x0020 #define R92C_SECCFG_TXBCKEY_DEF 0x0040 #define R92C_SECCFG_RXBCKEY_DEF 0x0080 /* Bits for R92C_RXFLTMAP*. */ #define R92C_RXFLTMAP_SUBTYPE(subtype) \ (1 << ((subtype) >> IEEE80211_FC0_SUBTYPE_SHIFT)) /* * Baseband registers. */ #define R92C_FPGA0_RFMOD 0x800 #define R92C_FPGA0_TXINFO 0x804 #define R92C_HSSI_PARAM1(chain) (0x820 + (chain) * 8) #define R92C_HSSI_PARAM2(chain) (0x824 + (chain) * 8) #define R92C_TXAGC_RATE18_06(i) (((i) == 0) ? 0xe00 : 0x830) #define R92C_TXAGC_RATE54_24(i) (((i) == 0) ? 0xe04 : 0x834) #define R92C_TXAGC_A_CCK1_MCS32 0xe08 #define R92C_TXAGC_B_CCK1_55_MCS32 0x838 #define R92C_TXAGC_B_CCK11_A_CCK2_11 0x86c #define R92C_TXAGC_MCS03_MCS00(i) (((i) == 0) ? 0xe10 : 0x83c) #define R92C_TXAGC_MCS07_MCS04(i) (((i) == 0) ? 0xe14 : 0x848) #define R92C_TXAGC_MCS11_MCS08(i) (((i) == 0) ? 0xe18 : 0x84c) #define R92C_TXAGC_MCS15_MCS12(i) (((i) == 0) ? 0xe1c : 0x868) #define R92C_LSSI_PARAM(chain) (0x840 + (chain) * 4) #define R92C_FPGA0_RFIFACEOE(chain) (0x860 + (chain) * 4) #define R92C_FPGA0_RFIFACESW(idx) (0x870 + (idx) * 4) #define R92C_FPGA0_RFPARAM(idx) (0x878 + (idx) * 4) #define R92C_FPGA0_ANAPARAM2 0x884 #define R92C_LSSI_READBACK(chain) (0x8a0 + (chain) * 4) #define R92C_HSPI_READBACK(chain) (0x8b8 + (chain) * 4) #define R92C_FPGA1_RFMOD 0x900 #define R92C_FPGA1_TXINFO 0x90c #define R92C_CCK0_SYSTEM 0xa00 #define R92C_CCK0_AFESETTING 0xa04 #define R92C_OFDM0_TRXPATHENA 0xc04 #define R92C_OFDM0_TRMUXPAR 0xc08 #define R92C_OFDM0_RXIQIMBALANCE(chain) (0xc14 + (chain) * 8) #define R92C_OFDM0_ECCATHRESHOLD 0xc4c #define R92C_OFDM0_AGCCORE1(chain) (0xc50 + (chain) * 8) #define R92C_OFDM0_AGCPARAM1 0xc70 #define R92C_OFDM0_AGCRSSITABLE 0xc78 #define R92C_OFDM0_TXIQIMBALANCE(chain) (0xc80 + (chain) * 8) #define R92C_OFDM0_TXAFE(chain) (0xc94 + (chain) * 8) #define R92C_OFDM0_RXIQEXTANTA 0xca0 +#define R92C_OFDM0_TXPSEUDONOISEWGT 0xce4 #define R92C_OFDM1_LSTF 0xd00 /* Bits for R92C_FPGA[01]_RFMOD. */ #define R92C_RFMOD_40MHZ 0x00000001 #define R92C_RFMOD_JAPAN 0x00000002 #define R92C_RFMOD_CCK_TXSC 0x00000030 #define R92C_RFMOD_CCK_EN 0x01000000 #define R92C_RFMOD_OFDM_EN 0x02000000 /* Bits for R92C_HSSI_PARAM1(i). */ #define R92C_HSSI_PARAM1_PI 0x00000100 /* Bits for R92C_HSSI_PARAM2(i). */ #define R92C_HSSI_PARAM2_CCK_HIPWR 0x00000200 #define R92C_HSSI_PARAM2_ADDR_LENGTH 0x00000400 #define R92C_HSSI_PARAM2_DATA_LENGTH 0x00000800 #define R92C_HSSI_PARAM2_READ_ADDR_M 0x7f800000 #define R92C_HSSI_PARAM2_READ_ADDR_S 23 #define R92C_HSSI_PARAM2_READ_EDGE 0x80000000 /* Bits for R92C_TXAGC_A_CCK1_MCS32. */ #define R92C_TXAGC_A_CCK1_M 0x0000ff00 #define R92C_TXAGC_A_CCK1_S 8 /* Bits for R92C_TXAGC_B_CCK11_A_CCK2_11. */ #define R92C_TXAGC_B_CCK11_M 0x000000ff #define R92C_TXAGC_B_CCK11_S 0 #define R92C_TXAGC_A_CCK2_M 0x0000ff00 #define R92C_TXAGC_A_CCK2_S 8 #define R92C_TXAGC_A_CCK55_M 0x00ff0000 #define R92C_TXAGC_A_CCK55_S 16 #define R92C_TXAGC_A_CCK11_M 0xff000000 #define R92C_TXAGC_A_CCK11_S 24 /* Bits for R92C_TXAGC_B_CCK1_55_MCS32. */ #define R92C_TXAGC_B_CCK1_M 0x0000ff00 #define R92C_TXAGC_B_CCK1_S 8 #define R92C_TXAGC_B_CCK2_M 0x00ff0000 #define R92C_TXAGC_B_CCK2_S 16 #define R92C_TXAGC_B_CCK55_M 0xff000000 #define R92C_TXAGC_B_CCK55_S 24 /* Bits for R92C_TXAGC_RATE18_06(x). */ #define R92C_TXAGC_RATE06_M 0x000000ff #define R92C_TXAGC_RATE06_S 0 #define R92C_TXAGC_RATE09_M 0x0000ff00 #define R92C_TXAGC_RATE09_S 8 #define R92C_TXAGC_RATE12_M 0x00ff0000 #define R92C_TXAGC_RATE12_S 16 #define R92C_TXAGC_RATE18_M 0xff000000 #define R92C_TXAGC_RATE18_S 24 /* Bits for R92C_TXAGC_RATE54_24(x). */ #define R92C_TXAGC_RATE24_M 0x000000ff #define R92C_TXAGC_RATE24_S 0 #define R92C_TXAGC_RATE36_M 0x0000ff00 #define R92C_TXAGC_RATE36_S 8 #define R92C_TXAGC_RATE48_M 0x00ff0000 #define R92C_TXAGC_RATE48_S 16 #define R92C_TXAGC_RATE54_M 0xff000000 #define R92C_TXAGC_RATE54_S 24 /* Bits for R92C_TXAGC_MCS03_MCS00(x). */ #define R92C_TXAGC_MCS00_M 0x000000ff #define R92C_TXAGC_MCS00_S 0 #define R92C_TXAGC_MCS01_M 0x0000ff00 #define R92C_TXAGC_MCS01_S 8 #define R92C_TXAGC_MCS02_M 0x00ff0000 #define R92C_TXAGC_MCS02_S 16 #define R92C_TXAGC_MCS03_M 0xff000000 #define R92C_TXAGC_MCS03_S 24 /* Bits for R92C_TXAGC_MCS07_MCS04(x). */ #define R92C_TXAGC_MCS04_M 0x000000ff #define R92C_TXAGC_MCS04_S 0 #define R92C_TXAGC_MCS05_M 0x0000ff00 #define R92C_TXAGC_MCS05_S 8 #define R92C_TXAGC_MCS06_M 0x00ff0000 #define R92C_TXAGC_MCS06_S 16 #define R92C_TXAGC_MCS07_M 0xff000000 #define R92C_TXAGC_MCS07_S 24 /* Bits for R92C_TXAGC_MCS11_MCS08(x). */ #define R92C_TXAGC_MCS08_M 0x000000ff #define R92C_TXAGC_MCS08_S 0 #define R92C_TXAGC_MCS09_M 0x0000ff00 #define R92C_TXAGC_MCS09_S 8 #define R92C_TXAGC_MCS10_M 0x00ff0000 #define R92C_TXAGC_MCS10_S 16 #define R92C_TXAGC_MCS11_M 0xff000000 #define R92C_TXAGC_MCS11_S 24 /* Bits for R92C_TXAGC_MCS15_MCS12(x). */ #define R92C_TXAGC_MCS12_M 0x000000ff #define R92C_TXAGC_MCS12_S 0 #define R92C_TXAGC_MCS13_M 0x0000ff00 #define R92C_TXAGC_MCS13_S 8 #define R92C_TXAGC_MCS14_M 0x00ff0000 #define R92C_TXAGC_MCS14_S 16 #define R92C_TXAGC_MCS15_M 0xff000000 #define R92C_TXAGC_MCS15_S 24 /* Bits for R92C_LSSI_PARAM(i). */ #define R92C_LSSI_PARAM_DATA_M 0x000fffff #define R92C_LSSI_PARAM_DATA_S 0 #define R92C_LSSI_PARAM_ADDR_M 0x03f00000 #define R92C_LSSI_PARAM_ADDR_S 20 /* Bits for R92C_FPGA0_RFIFACEOE(0). */ #define R92C_FPGA0_RFIFACEOE0_ANT_M 0x00000300 #define R92C_FPGA0_RFIFACEOE0_ANT_S 8 /* Bits for R92C_FPGA0_ANAPARAM2. */ #define R92C_FPGA0_ANAPARAM2_CBW20 0x00000400 /* Bits for R92C_LSSI_READBACK(i). */ #define R92C_LSSI_READBACK_DATA_M 0x000fffff #define R92C_LSSI_READBACK_DATA_S 0 + +/* Bits for R92C_CCK0_SYSTEM. */ +#define R92C_CCK0_SYSTEM_CCK_SIDEBAND 0x00000010 /* Bits for R92C_OFDM0_AGCCORE1(i). */ #define R92C_OFDM0_AGCCORE1_GAIN_M 0x0000007f #define R92C_OFDM0_AGCCORE1_GAIN_S 0 /* * RF (6052) registers. */ #define R92C_RF_AC 0x00 #define R92C_RF_IQADJ_G(i) (0x01 + (i)) #define R92C_RF_POW_TRSW 0x05 #define R92C_RF_GAIN_RX 0x06 #define R92C_RF_GAIN_TX 0x07 #define R92C_RF_TXM_IDAC 0x08 #define R92C_RF_BS_IQGEN 0x0f #define R92C_RF_MODE1 0x10 #define R92C_RF_MODE2 0x11 #define R92C_RF_RX_AGC_HP 0x12 #define R92C_RF_TX_AGC 0x13 #define R92C_RF_BIAS 0x14 #define R92C_RF_IPA 0x15 #define R92C_RF_POW_ABILITY 0x17 #define R92C_RF_CHNLBW 0x18 #define R92C_RF_RX_G1 0x1a #define R92C_RF_RX_G2 0x1b #define R92C_RF_RX_BB2 0x1c #define R92C_RF_RX_BB1 0x1d #define R92C_RF_RCK1 0x1e #define R92C_RF_RCK2 0x1f #define R92C_RF_TX_G(i) (0x20 + (i)) #define R92C_RF_TX_BB1 0x23 #define R92C_RF_T_METER 0x24 #define R92C_RF_SYN_G(i) (0x25 + (i)) #define R92C_RF_RCK_OS 0x30 #define R92C_RF_TXPA_G(i) (0x31 + (i)) /* Bits for R92C_RF_AC. */ #define R92C_RF_AC_MODE_M 0x70000 #define R92C_RF_AC_MODE_S 16 #define R92C_RF_AC_MODE_STANDBY 1 /* Bits for R92C_RF_CHNLBW. */ #define R92C_RF_CHNLBW_CHNL_M 0x003ff #define R92C_RF_CHNLBW_CHNL_S 0 #define R92C_RF_CHNLBW_BW20 0x00400 #define R92C_RF_CHNLBW_LCSTART 0x08000 /* Bits for R92C_RF_T_METER. */ #define R92C_RF_T_METER_START 0x60 #define R92C_RF_T_METER_VAL_M 0x1f #define R92C_RF_T_METER_VAL_S 0 #endif /* R92C_REG_H */ Index: projects/clang400-import/sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c (revision 312720) @@ -1,246 +1,247 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct rtwn_r92c_txpwr r92c_txpwr; void r92cu_attach(struct rtwn_usb_softc *); static void r92cu_postattach(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; struct ieee80211com *ic = &sc->sc_ic; if (!(rs->chip & R92C_CHIP_92C) && rs->board_type == R92C_BOARD_TYPE_HIGHPA) { sc->agc_prog = &rtl8188ru_agc[0]; sc->agc_size = nitems(rtl8188ru_agc); rs->rs_txagc = &rtl8188ru_txagc[0]; } else { sc->agc_prog = &rtl8192ce_agc[0]; sc->agc_size = nitems(rtl8192ce_agc); rs->rs_txagc = &rtl8192cu_txagc[0]; } if ((rs->chip & (R92C_CHIP_UMC_A_CUT | R92C_CHIP_92C)) == R92C_CHIP_UMC_A_CUT) { sc->fwname = "rtwn-rtl8192cfwU"; } else { sc->fwname = "rtwn-rtl8192cfwT"; } sc->fwsig = 0x88c; rs->rs_scan_start = ic->ic_scan_start; ic->ic_scan_start = r92c_scan_start; rs->rs_scan_end = ic->ic_scan_end; ic->ic_scan_end = r92c_scan_end; } static void r92cu_set_name(struct rtwn_softc *sc) { struct r92c_softc *rs = sc->sc_priv; if (!(rs->chip & R92C_CHIP_92C)) { if (rs->board_type == R92C_BOARD_TYPE_HIGHPA) sc->name = "RTL8188RU"; else if (rs->board_type == R92C_BOARD_TYPE_MINICARD) sc->name = "RTL8188CU-VAU"; else sc->name = "RTL8188CUS"; } else sc->name = "RTL8192CU"; } static void r92cu_attach_private(struct rtwn_softc *sc) { struct r92c_softc *rs; rs = malloc(sizeof(struct r92c_softc), M_RTWN_PRIV, M_WAITOK | M_ZERO); rs->rs_txpwr = &r92c_txpwr; rs->rs_set_bw20 = r92c_set_bw20; rs->rs_get_txpower = r92c_get_txpower; rs->rs_set_gain = r92c_set_gain; rs->rs_tx_enable_ampdu = r92c_tx_enable_ampdu; rs->rs_tx_setup_hwseq = r92c_tx_setup_hwseq; rs->rs_tx_setup_macid = r92c_tx_setup_macid; rs->rs_set_name = r92cu_set_name; #ifndef RTWN_WITHOUT_UCODE rs->rs_c2h_timeout = hz; callout_init_mtx(&rs->rs_c2h_report, &sc->sc_mtx, 0); #endif rs->rf_read_delay[0] = 10; rs->rf_read_delay[1] = 100; rs->rf_read_delay[2] = 10; sc->sc_priv = rs; } static void r92cu_adj_devcaps(struct rtwn_softc *sc) { /* XXX Currently broken / incomplete. */ sc->sc_ic.ic_caps &= ~IEEE80211_C_PMGT; } void r92cu_attach(struct rtwn_usb_softc *uc) { struct rtwn_softc *sc = &uc->uc_sc; /* USB part. */ uc->uc_align_rx = r92cu_align_rx; uc->tx_agg_desc_num = 6; /* Common part. */ sc->sc_flags = RTWN_FLAG_CAM_FIXED; sc->sc_set_chan = r92c_set_chan; sc->sc_fill_tx_desc = r92c_fill_tx_desc; sc->sc_fill_tx_desc_raw = r92c_fill_tx_desc_raw; sc->sc_fill_tx_desc_null = r92c_fill_tx_desc_null; sc->sc_dump_tx_desc = r92cu_dump_tx_desc; sc->sc_tx_radiotap_flags = r92c_tx_radiotap_flags; sc->sc_rx_radiotap_flags = r92c_rx_radiotap_flags; sc->sc_get_rx_stats = r92c_get_rx_stats; sc->sc_get_rssi_cck = r92c_get_rssi_cck; sc->sc_get_rssi_ofdm = r92c_get_rssi_ofdm; sc->sc_classify_intr = r92cu_classify_intr; sc->sc_handle_tx_report = rtwn_nop_softc_uint8_int; sc->sc_handle_c2h_report = rtwn_nop_softc_uint8_int; sc->sc_check_frame = rtwn_nop_int_softc_mbuf; sc->sc_rf_read = r92c_rf_read; sc->sc_rf_write = r92c_rf_write; sc->sc_check_condition = r92c_check_condition; sc->sc_efuse_postread = r92c_efuse_postread; sc->sc_parse_rom = r92c_parse_rom; sc->sc_set_led = r92cu_set_led; sc->sc_power_on = r92cu_power_on; sc->sc_power_off = r92cu_power_off; #ifndef RTWN_WITHOUT_UCODE sc->sc_fw_reset = r92c_fw_reset; sc->sc_fw_download_enable = r92c_fw_download_enable; #endif + sc->sc_llt_init = r92c_llt_init; sc->sc_set_page_size = r92c_set_page_size; sc->sc_lc_calib = r92c_lc_calib; sc->sc_iq_calib = r92c_iq_calib; /* XXX TODO */ sc->sc_read_chipid_vendor = r92c_read_chipid_vendor; sc->sc_adj_devcaps = r92cu_adj_devcaps; sc->sc_vap_preattach = rtwn_nop_softc_vap; sc->sc_postattach = r92cu_postattach; sc->sc_detach_private = r92c_detach_private; sc->sc_set_media_status = r92c_joinbss_rpt; #ifndef RTWN_WITHOUT_UCODE sc->sc_set_rsvd_page = r92c_set_rsvd_page; sc->sc_set_pwrmode = r92c_set_pwrmode; sc->sc_set_rssi = r92c_set_rssi; #endif sc->sc_beacon_init = r92c_beacon_init; sc->sc_beacon_enable = r92c_beacon_enable; sc->sc_beacon_set_rate = rtwn_nop_void_int; sc->sc_beacon_select = rtwn_nop_softc_int; sc->sc_temp_measure = r92c_temp_measure; sc->sc_temp_read = r92c_temp_read; sc->sc_init_tx_agg = r92cu_init_tx_agg; sc->sc_init_rx_agg = r92cu_init_rx_agg; sc->sc_init_ampdu = r92c_init_ampdu; sc->sc_init_intr = r92cu_init_intr; sc->sc_init_edca = r92c_init_edca; sc->sc_init_bb = r92cu_init_bb; sc->sc_init_rf = r92c_init_rf; sc->sc_init_antsel = r92c_init_antsel; sc->sc_post_init = r92cu_post_init; sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc; sc->mac_prog = &rtl8192cu_mac[0]; sc->mac_size = nitems(rtl8192cu_mac); sc->bb_prog = &rtl8192cu_bb[0]; sc->bb_size = nitems(rtl8192cu_bb); sc->rf_prog = &rtl8192c_rf[0]; sc->page_count = R92CU_TX_PAGE_COUNT; sc->pktbuf_count = R92C_TXPKTBUF_COUNT; sc->ackto = 0x40; sc->npubqpages = R92CU_PUBQ_NPAGES; sc->page_size = R92C_TX_PAGE_SIZE; sc->txdesc_len = sizeof(struct r92cu_tx_desc); sc->efuse_maxlen = R92C_EFUSE_MAX_LEN; sc->efuse_maplen = R92C_EFUSE_MAP_LEN; sc->rx_dma_size = R92C_RX_DMA_BUFFER_SIZE; sc->macid_limit = R92C_MACID_MAX + 1; sc->cam_entry_limit = R92C_CAM_ENTRY_COUNT; sc->fwsize_limit = R92C_MAX_FW_SIZE; sc->temp_delta = R92C_CALIB_THRESHOLD; sc->bcn_status_reg[0] = R92C_TDECTRL; sc->bcn_status_reg[1] = R92C_TDECTRL; sc->rcr = 0; r92cu_attach_private(sc); } Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e.h (revision 312720) @@ -0,0 +1,82 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef RTL8192E_H +#define RTL8192E_H + +/* + * Global definitions. + */ +#define R92E_PUBQ_NPAGES 222 +#define R92E_TX_PAGE_COUNT 243 + +#define R92E_TX_PAGE_SIZE 256 +#define R92E_RX_DMA_BUFFER_SIZE 0x3d00 + +#define R92E_MAX_FW_SIZE 0x8000 + + +/* + * Function declarations. + */ +/* r92e_attach.c */ +void r92e_detach_private(struct rtwn_softc *); + +/* r92e_chan.c */ +void r92e_set_chan(struct rtwn_softc *, struct ieee80211_channel *); + +/* r92e_fw.c */ +#ifndef RTWN_WITHOUT_UCODE +void r92e_fw_reset(struct rtwn_softc *, int); +void r92e_set_media_status(struct rtwn_softc *, int); +int r92e_set_pwrmode(struct rtwn_softc *, struct ieee80211vap *, int); +#endif + +/* r92e_init.c */ +int r92e_llt_init(struct rtwn_softc *); +void r92e_init_bb(struct rtwn_softc *); +void r92e_init_rf(struct rtwn_softc *); +int r92e_power_on(struct rtwn_softc *); +void r92e_power_off(struct rtwn_softc *); + +/* r92e_led.c */ +void r92e_set_led(struct rtwn_softc *, int, int); + +/* r92e_rf.c */ +uint32_t r92e_rf_read(struct rtwn_softc *, int, uint8_t); +void r92e_rf_write(struct rtwn_softc *, int, uint8_t, uint32_t); + +/* r92e_rom.c */ +void r92e_parse_rom_common(struct rtwn_softc *, uint8_t *); +void r92e_parse_rom(struct rtwn_softc *, uint8_t *); + +/* r92e_rx.c */ +void r92e_handle_c2h_report(struct rtwn_softc *, uint8_t *, int); +int8_t r92e_get_rssi_cck(struct rtwn_softc *, void *); + +#endif /* RTL8192E_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_chan.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_chan.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_chan.c (revision 312720) @@ -0,0 +1,294 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include + +static int +r92e_get_power_group(struct rtwn_softc *sc, struct ieee80211_channel *c) +{ + uint8_t chan; + int group; + + chan = rtwn_chan2centieee(c); + if (IEEE80211_IS_CHAN_2GHZ(c)) { + if (chan <= 2) group = 0; + else if (chan <= 5) group = 1; + else if (chan <= 8) group = 2; + else if (chan <= 11) group = 3; + else if (chan <= 14) group = 4; + else { + KASSERT(0, ("wrong 2GHz channel %d!\n", chan)); + return (-1); + } + } else { + KASSERT(0, ("wrong channel band (flags %08X)\n", c->ic_flags)); + return (-1); + } + + return (group); +} + +static void +r92e_get_txpower(struct rtwn_softc *sc, int chain, struct ieee80211_channel *c, + uint8_t power[RTWN_RIDX_COUNT]) +{ + struct r92e_softc *rs = sc->sc_priv; + int i, ridx, group, max_mcs; + + /* Determine channel group. */ + group = r92e_get_power_group(sc, c); + if (group == -1) { /* shouldn't happen */ + device_printf(sc->sc_dev, "%s: incorrect channel\n", __func__); + return; + } + + max_mcs = RTWN_RIDX_MCS(sc->ntxchains * 8 - 1); + + /* XXX regulatory */ + /* XXX net80211 regulatory */ + + for (ridx = RTWN_RIDX_CCK1; ridx <= RTWN_RIDX_CCK11; ridx++) + power[ridx] = rs->cck_tx_pwr[chain][group]; + for (ridx = RTWN_RIDX_OFDM6; ridx <= max_mcs; ridx++) + power[ridx] = rs->ht40_tx_pwr_2g[chain][group]; + + for (ridx = RTWN_RIDX_OFDM6; ridx <= RTWN_RIDX_OFDM54; ridx++) + power[ridx] += rs->ofdm_tx_pwr_diff_2g[chain][0]; + + for (i = 0; i < sc->ntxchains; i++) { + uint8_t min_mcs; + uint8_t pwr_diff; + + if (IEEE80211_IS_CHAN_HT40(c)) + pwr_diff = rs->bw40_tx_pwr_diff_2g[chain][i]; + else + pwr_diff = rs->bw20_tx_pwr_diff_2g[chain][i]; + + min_mcs = RTWN_RIDX_MCS(i * 8); + for (ridx = min_mcs; ridx <= max_mcs; ridx++) + power[ridx] += pwr_diff; + + } + + /* Apply max limit. */ + for (ridx = RTWN_RIDX_CCK1; ridx <= max_mcs; ridx++) { + if (power[ridx] > R92C_MAX_TX_PWR) + power[ridx] = R92C_MAX_TX_PWR; + } + +#ifdef RTWN_DEBUG + if (sc->sc_debug & RTWN_DEBUG_TXPWR) { + /* Dump per-rate Tx power values. */ + printf("Tx power for chain %d:\n", chain); + for (ridx = RTWN_RIDX_CCK1; ridx < RTWN_RIDX_COUNT; ridx++) + printf("Rate %d = %u\n", ridx, power[ridx]); + } +#endif +} + + +static void +r92e_write_txpower(struct rtwn_softc *sc, int chain, + uint8_t power[RTWN_RIDX_COUNT]) +{ + uint32_t reg; + + /* Write per-CCK rate Tx power. */ + if (chain == 0) { + reg = rtwn_bb_read(sc, R92C_TXAGC_A_CCK1_MCS32); + reg = RW(reg, R92C_TXAGC_A_CCK1, power[RTWN_RIDX_CCK1]); + rtwn_bb_write(sc, R92C_TXAGC_A_CCK1_MCS32, reg); + reg = rtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); + reg = RW(reg, R92C_TXAGC_A_CCK2, power[RTWN_RIDX_CCK2]); + reg = RW(reg, R92C_TXAGC_A_CCK55, power[RTWN_RIDX_CCK55]); + reg = RW(reg, R92C_TXAGC_A_CCK11, power[RTWN_RIDX_CCK11]); + rtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); + } else { + reg = rtwn_bb_read(sc, R92C_TXAGC_B_CCK1_55_MCS32); + reg = RW(reg, R92C_TXAGC_B_CCK1, power[RTWN_RIDX_CCK1]); + reg = RW(reg, R92C_TXAGC_B_CCK2, power[RTWN_RIDX_CCK2]); + reg = RW(reg, R92C_TXAGC_B_CCK55, power[RTWN_RIDX_CCK55]); + rtwn_bb_write(sc, R92C_TXAGC_B_CCK1_55_MCS32, reg); + reg = rtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11); + reg = RW(reg, R92C_TXAGC_B_CCK11, power[RTWN_RIDX_CCK11]); + rtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg); + } + /* Write per-OFDM rate Tx power. */ + rtwn_bb_write(sc, R92C_TXAGC_RATE18_06(chain), + SM(R92C_TXAGC_RATE06, power[RTWN_RIDX_OFDM6]) | + SM(R92C_TXAGC_RATE09, power[RTWN_RIDX_OFDM9]) | + SM(R92C_TXAGC_RATE12, power[RTWN_RIDX_OFDM12]) | + SM(R92C_TXAGC_RATE18, power[RTWN_RIDX_OFDM18])); + rtwn_bb_write(sc, R92C_TXAGC_RATE54_24(chain), + SM(R92C_TXAGC_RATE24, power[RTWN_RIDX_OFDM24]) | + SM(R92C_TXAGC_RATE36, power[RTWN_RIDX_OFDM36]) | + SM(R92C_TXAGC_RATE48, power[RTWN_RIDX_OFDM48]) | + SM(R92C_TXAGC_RATE54, power[RTWN_RIDX_OFDM54])); + /* Write per-MCS Tx power. */ + rtwn_bb_write(sc, R92C_TXAGC_MCS03_MCS00(chain), + SM(R92C_TXAGC_MCS00, power[RTWN_RIDX_MCS(0)]) | + SM(R92C_TXAGC_MCS01, power[RTWN_RIDX_MCS(1)]) | + SM(R92C_TXAGC_MCS02, power[RTWN_RIDX_MCS(2)]) | + SM(R92C_TXAGC_MCS03, power[RTWN_RIDX_MCS(3)])); + rtwn_bb_write(sc, R92C_TXAGC_MCS07_MCS04(chain), + SM(R92C_TXAGC_MCS04, power[RTWN_RIDX_MCS(4)]) | + SM(R92C_TXAGC_MCS05, power[RTWN_RIDX_MCS(5)]) | + SM(R92C_TXAGC_MCS06, power[RTWN_RIDX_MCS(6)]) | + SM(R92C_TXAGC_MCS07, power[RTWN_RIDX_MCS(7)])); + if (sc->ntxchains >= 2) { + rtwn_bb_write(sc, R92C_TXAGC_MCS11_MCS08(chain), + SM(R92C_TXAGC_MCS08, power[RTWN_RIDX_MCS(8)]) | + SM(R92C_TXAGC_MCS09, power[RTWN_RIDX_MCS(9)]) | + SM(R92C_TXAGC_MCS10, power[RTWN_RIDX_MCS(10)]) | + SM(R92C_TXAGC_MCS11, power[RTWN_RIDX_MCS(11)])); + rtwn_bb_write(sc, R92C_TXAGC_MCS15_MCS12(chain), + SM(R92C_TXAGC_MCS12, power[RTWN_RIDX_MCS(12)]) | + SM(R92C_TXAGC_MCS13, power[RTWN_RIDX_MCS(13)]) | + SM(R92C_TXAGC_MCS14, power[RTWN_RIDX_MCS(14)]) | + SM(R92C_TXAGC_MCS15, power[RTWN_RIDX_MCS(15)])); + } +} + +static void +r92e_set_txpower(struct rtwn_softc *sc, struct ieee80211_channel *c) +{ + uint8_t power[RTWN_RIDX_COUNT]; + int i; + + for (i = 0; i < sc->ntxchains; i++) { + memset(power, 0, sizeof(power)); + /* Compute per-rate Tx power values. */ + r92e_get_txpower(sc, i, c, power); + /* Write per-rate Tx power values to hardware. */ + r92e_write_txpower(sc, i, power); + } +} + +static void +r92e_set_bw40(struct rtwn_softc *sc, uint8_t chan, int prichlo) +{ + int i; + + rtwn_setbits_2(sc, R92C_WMAC_TRXPTCL_CTL, 0x100, 0x80); + rtwn_write_1(sc, R12A_DATA_SEC, + prichlo ? R12A_DATA_SEC_PRIM_DOWN_20 : R12A_DATA_SEC_PRIM_UP_20); + + rtwn_bb_setbits(sc, R92C_FPGA0_RFMOD, 0, R92C_RFMOD_40MHZ); + rtwn_bb_setbits(sc, R92C_FPGA1_RFMOD, 0, R92C_RFMOD_40MHZ); + + /* Select 40MHz bandwidth. */ + for (i = 0; i < sc->nrxchains; i++) + rtwn_rf_setbits(sc, i, R92C_RF_CHNLBW, + R88E_RF_CHNLBW_BW20, 0x400); + + /* Set CCK side band. */ + rtwn_bb_setbits(sc, R92C_CCK0_SYSTEM, + R92C_CCK0_SYSTEM_CCK_SIDEBAND, (prichlo ? 0 : 1) << 4); + + rtwn_bb_setbits(sc, R92C_OFDM1_LSTF, 0x0c00, (prichlo ? 1 : 2) << 10); + + rtwn_bb_setbits(sc, R92C_FPGA0_ANAPARAM2, + R92C_FPGA0_ANAPARAM2_CBW20, 0); + + rtwn_bb_setbits(sc, 0x818, 0x0c000000, (prichlo ? 2 : 1) << 26); +} + +static void +r92e_set_bw20(struct rtwn_softc *sc, uint8_t chan) +{ + int i; + + rtwn_setbits_2(sc, R92C_WMAC_TRXPTCL_CTL, 0x180, 0); + rtwn_write_1(sc, R12A_DATA_SEC, R12A_DATA_SEC_NO_EXT); + + rtwn_bb_setbits(sc, R92C_FPGA0_RFMOD, R92C_RFMOD_40MHZ, 0); + rtwn_bb_setbits(sc, R92C_FPGA1_RFMOD, R92C_RFMOD_40MHZ, 0); + + /* Select 20MHz bandwidth. */ + for (i = 0; i < sc->nrxchains; i++) + rtwn_rf_setbits(sc, i, R92C_RF_CHNLBW, + R88E_RF_CHNLBW_BW20, 0xc00); + + rtwn_bb_setbits(sc, R92C_OFDM0_TXPSEUDONOISEWGT, 0xc0000000, 0); +} + +void +r92e_set_chan(struct rtwn_softc *sc, struct ieee80211_channel *c) +{ + struct r92e_softc *rs = sc->sc_priv; + u_int chan; + int i; + + chan = rtwn_chan2centieee(c); + + for (i = 0; i < sc->nrxchains; i++) { + rtwn_rf_write(sc, i, R92C_RF_CHNLBW, + RW(rs->rf_chnlbw[0], R92C_RF_CHNLBW_CHNL, chan)); + } + + if (IEEE80211_IS_CHAN_HT40(c)) + r92e_set_bw40(sc, chan, IEEE80211_IS_CHAN_HT40U(c)); + else + r92e_set_bw20(sc, chan); + + /* Set Tx power for this new channel. */ + r92e_set_txpower(sc, c); +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_chan.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_fw.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_fw.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_fw.c (revision 312720) @@ -0,0 +1,137 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include + +#include + +#include + +#ifndef RTWN_WITHOUT_UCODE +void +r92e_fw_reset(struct rtwn_softc *sc, int reason) +{ + /* Reset MCU IO wrapper. */ + rtwn_setbits_1(sc, R92C_RSV_CTRL + 1, 0x01, 0); + + rtwn_setbits_1_shift(sc, R92C_SYS_FUNC_EN, + R92C_SYS_FUNC_EN_CPUEN, 0, 1); + + /* Enable MCU IO wrapper. */ + rtwn_setbits_1(sc, R92C_RSV_CTRL + 1, 0, 0x01); + + rtwn_setbits_1_shift(sc, R92C_SYS_FUNC_EN, + 0, R92C_SYS_FUNC_EN_CPUEN, 1); +} + +void +r92e_set_media_status(struct rtwn_softc *sc, int macid) +{ + struct r88e_fw_cmd_msrrpt status; + + if (macid & RTWN_MACID_VALID) + status.msrb0 = R88E_MSRRPT_B0_ASSOC; + else + status.msrb0 = R88E_MSRRPT_B0_DISASSOC; + status.macid = (macid & ~RTWN_MACID_VALID); + + if (r88e_fw_cmd(sc, R88E_CMD_MSR_RPT, &status, sizeof(status)) != 0) { + device_printf(sc->sc_dev, "%s: cannot change media status!\n", + __func__); + } +} + +int +r92e_set_pwrmode(struct rtwn_softc *sc, struct ieee80211vap *vap, int off) +{ + struct r12a_fw_cmd_pwrmode mode; + int error; + + if (off && vap->iv_state == IEEE80211_S_RUN && + (vap->iv_flags & IEEE80211_F_PMGTON)) { + mode.mode = R88E_PWRMODE_LEG; + /* + * TODO: switch to RFOFF state + * (something is missing here - Rx stops with it). + */ +#ifdef RTWN_TODO + mode.pwr_state = R88E_PWRMODE_STATE_RFOFF; +#else + mode.pwr_state = R88E_PWRMODE_STATE_RFON; +#endif + } else { + mode.mode = R88E_PWRMODE_CAM; + mode.pwr_state = R88E_PWRMODE_STATE_ALLON; + } + mode.pwrb1 = + SM(R88E_PWRMODE_B1_SMART_PS, R88E_PWRMODE_B1_LEG_NULLDATA) | + SM(R88E_PWRMODE_B1_RLBM, R88E_PWRMODE_B1_MODE_MIN); + /* XXX ignored */ + mode.bcn_pass = 0; + mode.queue_uapsd = 0; + mode.pwrb5 = 0; + error = r88e_fw_cmd(sc, R88E_CMD_SET_PWRMODE, &mode, sizeof(mode)); + if (error != 0) { + device_printf(sc->sc_dev, + "%s: CMD_SET_PWRMODE was not sent, error %d\n", + __func__, error); + } + + return (error); +} +#endif Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_fw.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_init.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_init.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_init.c (revision 312720) @@ -0,0 +1,385 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include + +#include +#include +#include +#include + +int +r92e_llt_init(struct rtwn_softc *sc) +{ + int ntries, error; + + error = rtwn_setbits_4(sc, R92C_AUTO_LLT, 0, R92C_AUTO_LLT_INIT); + if (error != 0) + return (error); + for (ntries = 0; ntries < 1000; ntries++) { + if (!(rtwn_read_4(sc, R92C_AUTO_LLT) & R92C_AUTO_LLT_INIT)) + return (0); + rtwn_delay(sc, 1); + } + return (ETIMEDOUT); +} + +static void +r92e_crystalcap_write(struct rtwn_softc *sc) +{ + struct r92e_softc *rs = sc->sc_priv; + uint32_t reg; + uint8_t val; + + val = rs->crystalcap & 0x3f; + reg = rtwn_bb_read(sc, R92E_AFE_XTAL_CTRL); + rtwn_bb_write(sc, R92E_AFE_XTAL_CTRL, + RW(reg, R92E_AFE_XTAL_CTRL_ADDR, val | val << 6)); + rtwn_bb_write(sc, R92C_AFE_XTAL_CTRL, 0x000f81fb); +} + +void +r92e_init_bb(struct rtwn_softc *sc) +{ + int i, j; + + rtwn_setbits_2(sc, R92C_SYS_FUNC_EN, 0, + R92C_SYS_FUNC_EN_USBA | R92C_SYS_FUNC_EN_USBD); + + /* Enable BB and RF. */ + rtwn_setbits_2(sc, R92C_SYS_FUNC_EN, 0, + R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST | + R92C_SYS_FUNC_EN_DIO_RF); + + /* PathA RF Power On. */ + rtwn_write_1(sc, R92C_RF_CTRL, + R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB); + + /* Write BB initialization values. */ + for (i = 0; i < sc->bb_size; i++) { + const struct rtwn_bb_prog *bb_prog = &sc->bb_prog[i]; + + while (!rtwn_check_condition(sc, bb_prog->cond)) { + KASSERT(bb_prog->next != NULL, + ("%s: wrong condition value (i %d)\n", + __func__, i)); + bb_prog = bb_prog->next; + } + + for (j = 0; j < bb_prog->count; j++) { + RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, + "BB: reg 0x%03x, val 0x%08x\n", + bb_prog->reg[j], bb_prog->val[j]); + + rtwn_bb_write(sc, bb_prog->reg[j], bb_prog->val[j]); + rtwn_delay(sc, 1); + } + } + + /* Write AGC values. */ + for (i = 0; i < sc->agc_size; i++) { + const struct rtwn_agc_prog *agc_prog = &sc->agc_prog[i]; + + while (!rtwn_check_condition(sc, agc_prog->cond)) { + KASSERT(agc_prog->next != NULL, + ("%s: wrong condition value (2) (i %d)\n", + __func__, i)); + agc_prog = agc_prog->next; + } + + for (j = 0; j < agc_prog->count; j++) { + RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, + "AGC: val 0x%08x\n", agc_prog->val[j]); + + rtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE, + agc_prog->val[j]); + rtwn_delay(sc, 1); + } + } + + if (rtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) & R92C_HSSI_PARAM2_CCK_HIPWR) + sc->sc_flags |= RTWN_FLAG_CCK_HIPWR; + + rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x00040022); + rtwn_delay(sc, 1); + rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x00040020); + rtwn_delay(sc, 1); + + r92e_crystalcap_write(sc); +} + +void +r92e_init_rf(struct rtwn_softc *sc) +{ + struct r92e_softc *rs = sc->sc_priv; + uint32_t reg, type; + int i, chain, idx, off; + + for (chain = 0, i = 0; chain < sc->nrxchains; chain++, i++) { + /* Save RF_ENV control type. */ + idx = chain / 2; + off = (chain % 2) * 16; + reg = rtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx)); + type = (reg >> off) & 0x10; + + /* Set RF_ENV enable. */ + rtwn_bb_setbits(sc, R92C_FPGA0_RFIFACEOE(chain), + 0, 0x100000); + rtwn_delay(sc, 1); + /* Set RF_ENV output high. */ + rtwn_bb_setbits(sc, R92C_FPGA0_RFIFACEOE(chain), + 0, 0x10); + rtwn_delay(sc, 1); + /* Set address and data lengths of RF registers. */ + rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(chain), + R92C_HSSI_PARAM2_ADDR_LENGTH, 0); + rtwn_delay(sc, 1); + rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(chain), + R92C_HSSI_PARAM2_DATA_LENGTH, 0); + rtwn_delay(sc, 1); + + /* Write RF initialization values for this chain. */ + i += r92c_init_rf_chain(sc, &sc->rf_prog[i], chain); + + /* Cache RF register CHNLBW. */ + rs->rf_chnlbw[chain] = rtwn_rf_read(sc, chain, R92C_RF_CHNLBW); + } + + /* Turn CCK and OFDM blocks on. */ + rtwn_bb_setbits(sc, R92C_FPGA0_RFMOD, 0, R92C_RFMOD_CCK_EN); + rtwn_bb_setbits(sc, R92C_FPGA0_RFMOD, 0, R92C_RFMOD_OFDM_EN); +} + +static void +r92e_adj_crystal(struct rtwn_softc *sc) +{ + + rtwn_setbits_1(sc, R92C_AFE_PLL_CTRL, R92C_AFE_PLL_CTRL_FREF_SEL, 0); + rtwn_setbits_4(sc, R92E_APE_PLL_CTRL_EXT, 0x00000380, 0); + rtwn_setbits_1(sc, R92C_AFE_PLL_CTRL, 0x40, 0); + rtwn_setbits_4(sc, R92E_APE_PLL_CTRL_EXT, 0x00200000, 0); +} + +int +r92e_power_on(struct rtwn_softc *sc) +{ +#define RTWN_CHK(res) do { \ + if (res != 0) \ + return (EIO); \ +} while(0) + int ntries; + + if (rtwn_read_4(sc, R92C_SYS_CFG) & R92C_SYS_CFG_TRP_BT_EN) + RTWN_CHK(rtwn_write_1(sc, R92C_LDO_SWR_CTRL, 0xc3)); + else { + RTWN_CHK(rtwn_setbits_4(sc, R92E_LDOV12_CTRL, 0x00100000, + 0x00500000)); + RTWN_CHK(rtwn_write_1(sc, R92C_LDO_SWR_CTRL, 0x83)); + } + + r92e_adj_crystal(sc); + + /* Enable WL suspend. */ + RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, + R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE, 0, 1)); + + /* Disable HWPDN, SW LPS and WL suspend. */ + RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, + R92C_APS_FSMCO_APFM_RSM | R92C_APS_FSMCO_AFSM_HSUS | + R92C_APS_FSMCO_AFSM_PCIE | R92C_APS_FSMCO_APDM_HPDN, 0, 1)); + + /* Wait for power ready bit. */ + for (ntries = 0; ntries < 5000; ntries++) { + if (rtwn_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST) + break; + rtwn_delay(sc, 10); + } + if (ntries == 5000) { + device_printf(sc->sc_dev, + "timeout waiting for chip power up\n"); + return (ETIMEDOUT); + } + + /* Release WLON reset. */ + RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, + R92C_APS_FSMCO_RDY_MACON, 2)); + + RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, + R92C_APS_FSMCO_APFM_ONMAC, 1)); + for (ntries = 0; ntries < 5000; ntries++) { + if (!(rtwn_read_2(sc, R92C_APS_FSMCO) & + R92C_APS_FSMCO_APFM_ONMAC)) + break; + rtwn_delay(sc, 10); + } + if (ntries == 5000) + return (ETIMEDOUT); + + /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ + RTWN_CHK(rtwn_write_2(sc, R92C_CR, 0)); + RTWN_CHK(rtwn_setbits_2(sc, R92C_CR, 0, + R92C_CR_HCI_TXDMA_EN | R92C_CR_TXDMA_EN | + R92C_CR_HCI_RXDMA_EN | R92C_CR_RXDMA_EN | + R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN | + ((sc->sc_hwcrypto != RTWN_CRYPTO_SW) ? R92C_CR_ENSEC : 0) | + R92C_CR_CALTMR_EN)); + + return (0); +} + +void +r92e_power_off(struct rtwn_softc *sc) +{ + int error, ntries; + + /* Stop Rx. */ + error = rtwn_write_1(sc, R92C_CR, 0); + if (error == ENXIO) /* hardware gone */ + return; + + /* Move card to Low Power state. */ + /* Block all Tx queues. */ + rtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); + + for (ntries = 0; ntries < 5000; ntries++) { + /* Should be zero if no packet is transmitting. */ + if (rtwn_read_4(sc, R88E_SCH_TXCMD) == 0) + break; + + rtwn_delay(sc, 10); + } + if (ntries == 5000) { + device_printf(sc->sc_dev, "%s: failed to block Tx queues\n", + __func__); + return; + } + + /* CCK and OFDM are disabled, and clock are gated. */ + rtwn_setbits_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_BBRSTB, 0); + + rtwn_delay(sc, 1); + + /* Reset whole BB. */ + rtwn_setbits_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_BB_GLB_RST, 0); + + /* Reset MAC TRX. */ + rtwn_write_1(sc, R92C_CR, + R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN); + + /* Check if removed later. */ + rtwn_setbits_1_shift(sc, R92C_CR, R92C_CR_ENSEC, 0, 1); + + /* Respond TxOK to scheduler */ + rtwn_setbits_1(sc, R92C_DUAL_TSF_RST, 0, R92C_DUAL_TSF_RST_TXOK); + + /* Reset MCU. */ + rtwn_write_1(sc, R92C_MCUFWDL, 0); + +#ifndef RTWN_WITHOUT_UCODE + /* Reset MCU IO wrapper. */ + rtwn_setbits_1(sc, R92C_RSV_CTRL + 1, 0x01, 0); + + rtwn_setbits_1_shift(sc, R92C_SYS_FUNC_EN, + R92C_SYS_FUNC_EN_CPUEN, 0, 1); + + /* Enable MCU IO wrapper. */ + rtwn_setbits_1(sc, R92C_RSV_CTRL + 1, 0, 0x01); +#endif + + /* Move card to Disabled state. */ + /* Turn off RF. */ + rtwn_write_1(sc, R92C_RF_CTRL, 0); + + /* Switch DPDT_SEL_P output. */ + rtwn_setbits_1(sc, R92C_LEDCFG2, 0x80, 0); + + /* Turn off MAC by HW state machine */ + rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, R92C_APS_FSMCO_APFM_OFF, + 1); + + for (ntries = 0; ntries < 5000; ntries++) { + /* Wait until it will be disabled. */ + if ((rtwn_read_2(sc, R92C_APS_FSMCO) & + R92C_APS_FSMCO_APFM_OFF) == 0) + break; + + rtwn_delay(sc, 10); + } + if (ntries == 5000) { + device_printf(sc->sc_dev, "%s: could not turn off MAC\n", + __func__); + return; + } + + /* SOP option to disable BG/MB. */ + rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0xff, + R92C_APS_FSMCO_SOP_RCK, 3); + + /* Unlock small LDO Register. */ + rtwn_setbits_1(sc, 0xcc, 0, 0x4); + + /* Disable small LDO. */ + rtwn_setbits_1(sc, R92C_SPS0_CTRL, 0x1, 0); + + /* Enable WL suspend. */ + rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_PCIE, + R92C_APS_FSMCO_AFSM_HSUS, 1); + + /* Enable SW LPS. */ + rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, + R92C_APS_FSMCO_APFM_RSM, 1); +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_init.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_led.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_led.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_led.c (revision 312720) @@ -0,0 +1,69 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +void +r92e_set_led(struct rtwn_softc *sc, int led, int on) +{ + + if (led == RTWN_LED_LINK) { + if (!on) + rtwn_setbits_1(sc, R92C_LEDCFG1, 0, R92C_LEDCFG1_DIS); + else + rtwn_setbits_1(sc, R92C_LEDCFG1, R92C_LEDCFG1_DIS, 0); + sc->ledlink = on; /* Save LED state. */ + } +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_led.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_priv.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_priv.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_priv.h (revision 312720) @@ -0,0 +1,262 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef R92E_PRIV_H +#define R92E_PRIV_H + +/* + * MAC initialization values. + */ +static const struct rtwn_mac_prog rtl8192eu_mac[] = { + { 0x011, 0xeb }, { 0x012, 0x07 }, { 0x014, 0x75 }, { 0x303, 0xa7 }, + { 0x428, 0x0a }, { 0x429, 0x10 }, { 0x430, 0x00 }, { 0x431, 0x00 }, + { 0x432, 0x00 }, { 0x433, 0x01 }, { 0x434, 0x04 }, { 0x435, 0x05 }, + { 0x436, 0x07 }, { 0x437, 0x08 }, { 0x43c, 0x04 }, { 0x43d, 0x05 }, + { 0x43e, 0x07 }, { 0x43f, 0x08 }, { 0x440, 0x5d }, { 0x441, 0x01 }, + { 0x442, 0x00 }, { 0x444, 0x10 }, { 0x445, 0x00 }, { 0x446, 0x00 }, + { 0x447, 0x00 }, { 0x448, 0x00 }, { 0x449, 0xf0 }, { 0x44a, 0x0f }, + { 0x44b, 0x3e }, { 0x44c, 0x10 }, { 0x44d, 0x00 }, { 0x44e, 0x00 }, + { 0x44f, 0x00 }, { 0x450, 0x00 }, { 0x451, 0xf0 }, { 0x452, 0x0f }, + { 0x453, 0x00 }, { 0x456, 0x5e }, { 0x460, 0x66 }, { 0x461, 0x66 }, + { 0x4c8, 0xff }, { 0x4c9, 0x08 }, { 0x4cc, 0xff }, { 0x4cd, 0xff }, + { 0x4ce, 0x01 }, { 0x500, 0x26 }, { 0x501, 0xa2 }, { 0x502, 0x2f }, + { 0x503, 0x00 }, { 0x504, 0x28 }, { 0x505, 0xa3 }, { 0x506, 0x5e }, + { 0x507, 0x00 }, { 0x508, 0x2b }, { 0x509, 0xa4 }, { 0x50a, 0x5e }, + { 0x50b, 0x00 }, { 0x50c, 0x4f }, { 0x50d, 0xa4 }, { 0x50e, 0x00 }, + { 0x50f, 0x00 }, { 0x512, 0x1c }, { 0x514, 0x0a }, { 0x516, 0x0a }, + { 0x525, 0x4f }, { 0x540, 0x12 }, { 0x541, 0x64 }, { 0x550, 0x10 }, + { 0x551, 0x10 }, { 0x559, 0x02 }, { 0x55c, 0x50 }, { 0x55d, 0xff }, + { 0x605, 0x30 }, { 0x608, 0x0e }, { 0x609, 0x2a }, { 0x620, 0xff }, + { 0x621, 0xff }, { 0x622, 0xff }, { 0x623, 0xff }, { 0x624, 0xff }, + { 0x625, 0xff }, { 0x626, 0xff }, { 0x627, 0xff }, { 0x638, 0x50 }, + { 0x63c, 0x0a }, { 0x63d, 0x0a }, { 0x63e, 0x0e }, { 0x63f, 0x0e }, + { 0x640, 0x40 }, { 0x642, 0x40 }, { 0x643, 0x00 }, { 0x652, 0xc8 }, + { 0x66e, 0x05 }, { 0x700, 0x21 }, { 0x701, 0x43 }, { 0x702, 0x65 }, + { 0x703, 0x87 }, { 0x708, 0x21 }, { 0x709, 0x43 }, { 0x70a, 0x65 }, + { 0x70b, 0x87 } +}; + + +/* + * Baseband initialization values. + */ +static const uint16_t rtl8192eu_bb_regs[] = { + 0x800, 0x804, 0x808, 0x80c, 0x810, 0x814, 0x818, 0x81c, 0x820, + 0x824, 0x828, 0x82c, 0x830, 0x834, 0x838, 0x83c, 0x840, 0x844, + 0x848, 0x84c, 0x850, 0x854, 0x858, 0x85c, 0x860, 0x864, 0x868, + 0x86c, 0x870, 0x874, 0x878, 0x87c, 0x880, 0x884, 0x888, 0x88c, + 0x890, 0x894, 0x898, 0x900, 0x904, 0x908, 0x90c, 0x910, 0x914, + 0x918, 0x91c, 0x924, 0x928, 0x92c, 0x930, 0x934, 0x938, 0x93c, + 0x940, 0x944, 0x94c, 0xa00, 0xa04, 0xa08, 0xa0c, 0xa10, 0xa14, + 0xa18, 0xa1c, 0xa20, 0xa24, 0xa28, 0xa2c, 0xa70, 0xa74, 0xa78, + 0xa7c, 0xa80, 0xb38, 0xc00, 0xc04, 0xc08, 0xc0c, 0xc10, 0xc14, + 0xc18, 0xc1c, 0xc20, 0xc24, 0xc28, 0xc2c, 0xc30, 0xc34, 0xc38, + 0xc3c, 0xc40, 0xc44, 0xc48, 0xc4c, 0xc50, 0xc54, 0xc58, 0xc5c, + 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74, 0xc78, 0xc7c, 0xc80, + 0xc84, 0xc88, 0xc8c, 0xc90, 0xc94, 0xc98, 0xc9c, 0xca0, 0xca4, + 0xca8, 0xcac, 0xcb0, 0xcb4, 0xcb8, 0xcbc, 0xcc0, 0xcc4, 0xcc8, + 0xccc, 0xcd0, 0xcd4, 0xcd8, 0xcdc, 0xce0, 0xce4, 0xce8, 0xcec, + 0xd00, 0xd04, 0xd08, 0xd0c, 0xd10, 0xd14, 0xd18, 0xd1c, 0xd2c, + 0xd30, 0xd34, 0xd38, 0xd3c, 0xd40, 0xd44, 0xd48, 0xd4c, 0xd50, + 0xd54, 0xd58, 0xd5c, 0xd60, 0xd64, 0xd68, 0xd6c, 0xd70, 0xd74, + 0xd78, 0xd80, 0xd84, 0xd88, 0xe00, 0xe04, 0xe08, 0xe10, 0xe14, + 0xe18, 0xe1c, 0xe28, 0xe30, 0xe34, 0xe38, 0xe3c, 0xe40, 0xe44, + 0xe48, 0xe4c, 0xe50, 0xe54, 0xe58, 0xe5c, 0xe60, 0xe68, 0xe6c, + 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, 0xed0, + 0xed4, 0xed8, 0xedc, 0xee0, 0xeec, 0xee4, 0xee8, 0xf14, 0xf4c, + 0xf00 +}; + +static const uint32_t rtl8192eu_bb_vals[] = { + 0x80040000, 0x00000003, 0x0000fc00, 0x0000000a, 0x10001331, + 0x020c3d10, 0x02220385, 0x00000000, 0x01000100, 0x00390204, + 0x01000100, 0x00390204, 0x32323232, 0x30303030, 0x30303030, + 0x30303030, 0x00010000, 0x00010000, 0x28282828, 0x28282828, + 0x00000000, 0x00000000, 0x009a009a, 0x01000014, 0x66f60000, + 0x061f0000, 0x30303030, 0x30303030, 0x00000000, 0x55004200, + 0x08080808, 0x00000000, 0xb0000c1c, 0x00000001, 0x00000000, + 0xcc0000c0, 0x00000800, 0xfffffffe, 0x40302010, 0x00000000, + 0x00000023, 0x00000000, 0x81121313, 0x806c0001, 0x00000001, + 0x00000000, 0x00010000, 0x00000001, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000008, 0x00d0c7c8, 0x81ff000c, 0x8c838300, + 0x2e68120f, 0x95009b78, 0x1114d028, 0x00881117, 0x89140f00, + 0x1a1b0000, 0x090e1317, 0x00000204, 0x00d30000, 0x101fff00, + 0x00000007, 0x00000900, 0x225b0606, 0x218075b1, 0x00000000, + 0x48071d40, 0x03a05633, 0x000000e4, 0x6c6c6c6c, 0x08800000, + 0x40000100, 0x08800000, 0x40000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x69e9ac47, 0x469652af, 0x49795994, + 0x0a97971c, 0x1f7c403f, 0x000100b7, 0xec020107, 0x007f037f, + 0x00340020, 0x0080801f, 0x00000020, 0x00248492, 0x00000000, + 0x7112848b, 0x47c00bff, 0x00000036, 0x00000600, 0x02013169, + 0x0000001f, 0x00b91612, 0x40000100, 0x21f60000, 0x40000100, + 0xa0e40000, 0x00121820, 0x00000000, 0x00121820, 0x00007f7f, + 0x00000000, 0x000300a0, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x28000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x64b22427, + 0x00766932, 0x00222222, 0x00040000, 0x77644302, 0x2f97d40c, + 0x00080740, 0x00020403, 0x0000907f, 0x20010201, 0xa0633333, + 0x3333bc43, 0x7a8f5b6b, 0x0000007f, 0xcc979975, 0x00000000, + 0x80608000, 0x00000000, 0x00127353, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x6437140a, 0x00000000, 0x00000282, + 0x30032064, 0x4653de68, 0x04518a3c, 0x00002101, 0x2a201c16, + 0x1812362e, 0x322c2220, 0x000e3c24, 0x01081008, 0x00000800, + 0xf0b50000, 0x30303030, 0x30303030, 0x03903030, 0x30303030, + 0x30303030, 0x30303030, 0x30303030, 0x00000000, 0x1000dc1f, + 0x10008c1f, 0x02140102, 0x681604c2, 0x01007c00, 0x01004800, + 0xfb000000, 0x000028d1, 0x1000dc1f, 0x10008c1f, 0x02140102, + 0x28160d05, 0x00000008, 0x0fc05656, 0x03c09696, 0x03c09696, + 0x0c005656, 0x0c005656, 0x0c005656, 0x0c005656, 0x03c09696, + 0x0c005656, 0x03c09696, 0x03c09696, 0x03c09696, 0x03c09696, + 0x0000d6d6, 0x0000d6d6, 0x0fc01616, 0xb0000c1c, 0x00000001, + 0x00000003, 0x00000000, 0x00000300 +}; + +static const struct rtwn_bb_prog rtl8192eu_bb[] = { + { + nitems(rtl8192eu_bb_regs), + rtl8192eu_bb_regs, + rtl8192eu_bb_vals, + { 0 }, + NULL + } +}; + + +static const uint32_t rtl8192eu_agc_vals[] = { + 0xfb000001, 0xfb010001, 0xfb020001, 0xfb030001, 0xfb040001, + 0xfb050001, 0xfa060001, 0xf9070001, 0xf8080001, 0xf7090001, + 0xf60a0001, 0xf50b0001, 0xf40c0001, 0xf30d0001, 0xf20e0001, + 0xf10f0001, 0xf0100001, 0xef110001, 0xee120001, 0xed130001, + 0xec140001, 0xeb150001, 0xea160001, 0xe9170001, 0xe8180001, + 0xe7190001, 0xc81a0001, 0xc71b0001, 0xc61c0001, 0x071d0001, + 0x061e0001, 0x051f0001, 0x04200001, 0x03210001, 0xaa220001, + 0xa9230001, 0xa8240001, 0xa7250001, 0xa6260001, 0x85270001, + 0x84280001, 0x83290001, 0x252a0001, 0x242b0001, 0x232c0001, + 0x222d0001, 0x672e0001, 0x662f0001, 0x65300001, 0x64310001, + 0x63320001, 0x62330001, 0x61340001, 0x45350001, 0x44360001, + 0x43370001, 0x42380001, 0x41390001, 0x403a0001, 0x403b0001, + 0x403c0001, 0x403d0001, 0x403e0001, 0x403f0001, 0xfb400001, + 0xfb410001, 0xfb420001, 0xfb430001, 0xfb440001, 0xfb450001, + 0xfa460001, 0xf9470001, 0xf8480001, 0xf7490001, 0xf64a0001, + 0xf54b0001, 0xf44c0001, 0xf34d0001, 0xf24e0001, 0xf14f0001, + 0xf0500001, 0xef510001, 0xee520001, 0xed530001, 0xec540001, + 0xeb550001, 0xea560001, 0xe9570001, 0xe8580001, 0xe7590001, + 0xe65a0001, 0xe55b0001, 0xe45c0001, 0xe35d0001, 0xe25e0001, + 0xe15f0001, 0x8a600001, 0x89610001, 0x88620001, 0x87630001, + 0x86640001, 0x85650001, 0x84660001, 0x83670001, 0x82680001, + 0x6b690001, 0x6a6a0001, 0x696b0001, 0x686c0001, 0x676d0001, + 0x666e0001, 0x656f0001, 0x64700001, 0x63710001, 0x62720001, + 0x61730001, 0x49740001, 0x48750001, 0x47760001, 0x46770001, + 0x45780001, 0x44790001, 0x437a0001, 0x427b0001, 0x417c0001, + 0x407d0001, 0x407e0001, 0x407f0001 +}; + +static const struct rtwn_agc_prog rtl8192eu_agc[] = { + { + nitems(rtl8192eu_agc_vals), + rtl8192eu_agc_vals, + { 0 }, + NULL + } +}; + +/* + * RF initialization values. + */ +static const uint8_t rtl8192eu_rf0_regs[] = { + 0x7f, 0x81, 0x00, 0x08, 0x18, 0x19, 0x1b, 0x1e, 0x1f, 0x2f, 0x3f, + 0x42, 0x57, 0x58, 0x67, 0x83, 0xb0, 0xb1, 0xb2, 0xb4, 0xb5, 0xb6, + 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbf, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, + 0xc7, 0xc8, 0xc9, 0xca, 0xdf, 0xef, 0x51, 0x52, 0x53, 0x56, 0x35, + 0x35, 0x35, 0x36, 0x36, 0x36, 0x36, 0x18, 0x5a, 0x19, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x00, 0x84, + 0x86, 0x87, 0x8e, 0x8f, 0xef, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, + 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0xef, + 0x18, 0x1e, 0x1f, 0x00 +}, rtl8192eu_rf1_regs[] = { + 0x7f, 0x81, 0x00, 0x08, 0x18, 0x19, 0x1b, 0x1e, 0x1f, 0x2f, 0x3f, + 0x42, 0x57, 0x58, 0x67, 0x7f, 0x81, 0x83, 0xdf, 0xef, 0x51, 0x52, + 0x53, 0x56, 0x35, 0x35, 0x35, 0x36, 0x36, 0x36, 0x36, 0x18, 0x5a, + 0x19, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x00, 0x84, 0x86, 0x87, 0x8e, 0x8f, 0xef, 0x3b, 0x3b, 0x3b, + 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, + 0x3b, 0x3b, 0xef, 0x00, 0x1e, 0x1f, 0x00 +}; + +static const uint32_t rtl8192eu_rf0_vals[] = { + 0x00082, 0x3fc00, 0x30000, 0x08400, 0x00407, 0x00012, 0x00064, + 0x80009, 0x00880, 0x1a060, 0x00000, 0x060c0, 0xd0000, 0xbe180, + 0x01552, 0x00000, 0xff9f1, 0x55418, 0x8cc00, 0x43083, 0x08166, + 0x0803e, 0x1c69f, 0x0407f, 0x80001, 0x40001, 0x00400, 0xc0000, + 0x02400, 0x00009, 0x40c91, 0x99999, 0x000a3, 0x88820, 0x76c06, + 0x00000, 0x80000, 0x00180, 0x001a0, 0x69545, 0x7e45e, 0x00071, + 0x51ff3, 0x000a8, 0x001e2, 0x002a8, 0x01c24, 0x09c24, 0x11c24, + 0x19c24, 0x00c07, 0x48000, 0x739d0, 0x0add7, 0x09dd4, 0x08dd1, + 0x07dce, 0x06dcb, 0x05dc8, 0x04dc5, 0x034cc, 0x0244f, 0x0144c, + 0x00014, 0x30159, 0x68180, 0x0014e, 0x48e00, 0x65540, 0x88000, + 0x020a0, 0xf02b0, 0xef7b0, 0xd4fb0, 0xcf060, 0xb0090, 0xa0080, + 0x90080, 0x8f780, 0x78730, 0x60fb0, 0x5ffa0, 0x40620, 0x37090, + 0x20080, 0x1f060, 0x0ffb0, 0x000a0, 0x0fc07, 0x00001, 0x80000, + 0x33e70 +}, rtl8192eu_rf1_vals[] = { + 0x00082, 0x3fc00, 0x30000, 0x08400, 0x00407, 0x00012, 0x00064, + 0x80009, 0x00880, 0x1a060, 0x00000, 0x060c0, 0xd0000, 0xbe180, + 0x01552, 0x00082, 0x3f000, 0x00000, 0x00180, 0x001a0, 0x69545, + 0x7e42e, 0x00071, 0x51ff3, 0x000a8, 0x001e0, 0x002a8, 0x01ca8, + 0x09c24, 0x11c24, 0x19c24, 0x00c07, 0x48000, 0x739d0, 0x0add7, + 0x09dd4, 0x08dd1, 0x07dce, 0x06dcb, 0x05dc8, 0x04dc5, 0x034cc, + 0x0244f, 0x0144c, 0x00014, 0x30159, 0x68180, 0x000ce, 0x48a00, + 0x65540, 0x88000, 0x020a0, 0xf02b0, 0xef7b0, 0xd4fb0, 0xcf060, + 0xb0090, 0xa0080, 0x90080, 0x8f780, 0x78730, 0x60fb0, 0x5ffa0, + 0x40620, 0x37090, 0x20080, 0x1f060, 0x0ffb0, 0x000a0, 0x10159, + 0x00001, 0x80000, 0x33e70 +}; + +static const struct rtwn_rf_prog rtl8192eu_rf[] = { + /* RF chain 0. */ + { + nitems(rtl8192eu_rf0_regs), + rtl8192eu_rf0_regs, + rtl8192eu_rf0_vals, + { 0 }, + NULL + }, + { 0, NULL, NULL, { 0 }, NULL }, + /* RF chain 1. */ + { + nitems(rtl8192eu_rf1_regs), + rtl8192eu_rf1_regs, + rtl8192eu_rf1_vals, + { 0 }, + NULL + }, + { 0, NULL, NULL, { 0 }, NULL } +}; + +#endif /* R92E_PRIV_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_priv.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_reg.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_reg.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_reg.h (revision 312720) @@ -0,0 +1,47 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef R92E_REG_H +#define R92E_REG_H + +#include +#include + +/* + * MAC registers. + */ +/* System Configuration. */ +#define R92E_LDOV12_CTRL 0x014 +#define R92E_AFE_XTAL_CTRL 0x02c +#define R92E_APE_PLL_CTRL_EXT 0x078 + +/* Bits for R92E_AFE_XTAL_CTRL. */ +#define R92E_AFE_XTAL_CTRL_ADDR_M 0x00fff000 +#define R92E_AFE_XTAL_CTRL_ADDR_S 12 + +#endif /* R92E_REG_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_reg.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rf.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rf.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rf.c (revision 312720) @@ -0,0 +1,88 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +uint32_t +r92e_rf_read(struct rtwn_softc *sc, int chain, uint8_t addr) +{ + uint32_t val; + + val = rtwn_bb_read(sc, R92C_HSSI_PARAM2(chain)); + rtwn_bb_write(sc, R92C_HSSI_PARAM2(chain), + RW(val, R92C_HSSI_PARAM2_READ_ADDR, addr) & + ~R92C_HSSI_PARAM2_READ_EDGE); + + rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(0), R92C_HSSI_PARAM2_READ_EDGE, 0); + rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(0), 0, R92C_HSSI_PARAM2_READ_EDGE); + rtwn_delay(sc, 20); + + if (rtwn_bb_read(sc, R92C_HSSI_PARAM1(chain)) & R92C_HSSI_PARAM1_PI) + val = rtwn_bb_read(sc, R92C_HSPI_READBACK(chain)); + else + val = rtwn_bb_read(sc, R92C_LSSI_READBACK(chain)); + return (MS(val, R92C_LSSI_READBACK_DATA)); +} + +void +r92e_rf_write(struct rtwn_softc *sc, int chain, uint8_t addr, uint32_t val) +{ + + rtwn_bb_setbits(sc, 0x818, 0x20000, 0); + rtwn_bb_write(sc, R92C_LSSI_PARAM(chain), + SM(R88E_LSSI_PARAM_ADDR, addr) | SM(R92C_LSSI_PARAM_DATA, val)); + rtwn_bb_setbits(sc, 0x818, 0, 0x20000); +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rf.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom.c (revision 312720) @@ -0,0 +1,144 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include +#include + +void +r92e_parse_rom(struct rtwn_softc *sc, uint8_t *buf) +{ + struct r92e_softc *rs = sc->sc_priv; + struct r92e_rom *rom = (struct r92e_rom *)buf; + uint8_t pwr_diff; + int i, j, k; + + sc->thermal_meter = rom->thermal_meter; + rs->crystalcap = RTWN_GET_ROM_VAR(rom->crystalcap, + R92E_ROM_CRYSTALCAP_DEF); + + for (i = 0; i < sc->ntxchains; i++) { + struct r92e_tx_pwr_2g *pwr_2g = &rom->tx_pwr[i].pwr_2g; + struct r92e_tx_pwr_diff_2g *pwr_diff_2g = + &rom->tx_pwr[i].pwr_diff_2g; + + for (j = 0; j < R92E_GROUP_2G - 1; j++) { + rs->cck_tx_pwr[i][j] = + RTWN_GET_ROM_VAR(pwr_2g->cck[j], + R92E_DEF_TX_PWR_2G); + rs->ht40_tx_pwr_2g[i][j] = + RTWN_GET_ROM_VAR(pwr_2g->ht40[j], + R92E_DEF_TX_PWR_2G); + } + rs->cck_tx_pwr[i][j] = RTWN_GET_ROM_VAR(pwr_2g->cck[j], + R92E_DEF_TX_PWR_2G); + + rs->cck_tx_pwr_diff_2g[i][0] = 0; + rs->ofdm_tx_pwr_diff_2g[i][0] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->ht20_ofdm, LOW_PART)); + rs->bw20_tx_pwr_diff_2g[i][0] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->ht20_ofdm, HIGH_PART)); + rs->bw40_tx_pwr_diff_2g[i][0] = 0; + pwr_diff = RTWN_GET_ROM_VAR(pwr_diff_2g->ht20_ofdm, + R92E_DEF_TX_PWR_HT20_DIFF); + if (pwr_diff != R92E_DEF_TX_PWR_HT20_DIFF) { + rs->ofdm_tx_pwr_diff_2g[i][0] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->ht20_ofdm, LOW_PART)); + rs->bw20_tx_pwr_diff_2g[i][0] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->ht20_ofdm, HIGH_PART)); + } else { + rs->ofdm_tx_pwr_diff_2g[i][0] = + rs->bw20_tx_pwr_diff_2g[i][0] = pwr_diff; + } + + for (j = 1, k = 0; k < nitems(pwr_diff_2g->diff123); j++, k++) { + pwr_diff = RTWN_GET_ROM_VAR( + pwr_diff_2g->diff123[k].ofdm_cck, + R92E_DEF_TX_PWR_DIFF); + if (pwr_diff != R92E_DEF_TX_PWR_DIFF) { + rs->cck_tx_pwr_diff_2g[i][j] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->diff123[k].ofdm_cck, + LOW_PART)); + rs->ofdm_tx_pwr_diff_2g[i][j] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->diff123[k].ofdm_cck, + HIGH_PART)); + } else { + rs->cck_tx_pwr_diff_2g[i][j] = + rs->ofdm_tx_pwr_diff_2g[i][j] = pwr_diff; + } + pwr_diff = RTWN_GET_ROM_VAR( + pwr_diff_2g->diff123[k].ht40_ht20, + R92E_DEF_TX_PWR_DIFF); + if (pwr_diff != R92E_DEF_TX_PWR_DIFF) { + rs->bw20_tx_pwr_diff_2g[i][j] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->diff123[k].ht40_ht20, + LOW_PART)); + rs->bw40_tx_pwr_diff_2g[i][j] = RTWN_SIGN4TO8( + MS(pwr_diff_2g->diff123[k].ht40_ht20, + HIGH_PART)); + } else { + rs->bw20_tx_pwr_diff_2g[i][j] = + rs->bw40_tx_pwr_diff_2g[i][j] = pwr_diff; + } + } + } + + rs->regulatory = MS(rom->rf_board_opt, R92C_ROM_RF1_REGULATORY); + + /* Read MAC address. */ + IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, rom->macaddr); +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_defs.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_defs.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_defs.h (revision 312720) @@ -0,0 +1,42 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef R92E_ROM_DEFS_H +#define R92E_ROM_DEFS_H + +#include + +#define R92E_GROUP_2G 6 + +#define R92E_MAX_TX_COUNT 4 +#define R92E_MAX_RF_PATH 4 + +#define R92E_EFUSE_MAX_LEN 512 +#define R92E_EFUSE_MAP_LEN 512 + +#endif /* R92E_ROM_DEFS_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_defs.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_image.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_image.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_image.h (revision 312720) @@ -0,0 +1,87 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * $FreeBSD$ + */ + +#ifndef R92E_ROM_IMAGE_H +#define R92E_ROM_IMAGE_H + +#include + +#define R92E_DEF_TX_PWR_2G 0x2d +#define R92E_DEF_TX_PWR_HT20_DIFF 0x02 +#define R92E_DEF_TX_PWR_DIFF 0xfe + +struct r92e_tx_pwr_2g { + uint8_t cck[R92E_GROUP_2G]; + uint8_t ht40[R92E_GROUP_2G - 1]; +} __packed; + +struct r92e_tx_pwr_diff_2g { + uint8_t ht20_ofdm; + struct { + uint8_t ht40_ht20; + uint8_t ofdm_cck; + } __packed diff123[R92E_MAX_TX_COUNT - 1]; +} __packed; + +struct r92e_tx_pwr { + struct r92e_tx_pwr_2g pwr_2g; + struct r92e_tx_pwr_diff_2g pwr_diff_2g; + uint8_t reserved[24]; +} __packed; + +/* + * RTL8192EU ROM image. + */ +struct r92e_rom { + uint8_t reserved1[16]; + struct r92e_tx_pwr tx_pwr[R92E_MAX_RF_PATH]; + uint8_t channel_plan; + uint8_t crystalcap; +#define R92E_ROM_CRYSTALCAP_DEF 0x20 + + uint8_t thermal_meter; + uint8_t iqk_lck; + uint8_t pa_type; + uint8_t lna_type_2g; + uint8_t reserved2; + uint8_t lna_type_5g; + uint8_t reserved3; + uint8_t rf_board_opt; + uint8_t rf_feature_opt; + uint8_t rf_bt_opt; + uint8_t version; + uint8_t customer_id; + uint8_t tx_bbswing_2g; + uint8_t tx_bbswing_5g; + uint8_t tx_pwr_calib_rate; + uint8_t rf_ant_opt; + uint8_t rfe_option; + uint8_t reserved4[5]; + uint16_t vid; + uint16_t pid; + uint8_t reserved5[3]; + uint8_t macaddr[IEEE80211_ADDR_LEN]; + uint8_t reserved6[2]; + uint8_t string[7]; /* "Realtek" */ + uint8_t reserved7[282]; +} __packed; + +_Static_assert(sizeof(struct r92e_rom) == R92E_EFUSE_MAP_LEN, + "R92E_EFUSE_MAP_LEN must be equal to sizeof(struct r92e_rom)!"); + +#endif /* R92E_ROM_IMAGE_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rom_image.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rx.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rx.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rx.c (revision 312720) @@ -0,0 +1,97 @@ +/*- + * Copyright (c) 2010 Damien Bergamini + * Copyright (c) 2014, 2017 Kevin Lo + * Copyright (c) 2015-2016 Andriy Voskoboinyk + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#ifndef RTWN_WITHOUT_UCODE +void +r92e_handle_c2h_report(struct rtwn_softc *sc, uint8_t *buf, int len) +{ + + /* Skip Rx descriptor. */ + buf += sizeof(struct r92c_rx_stat); + len -= sizeof(struct r92c_rx_stat); + + if (len < 2) { + device_printf(sc->sc_dev, "C2H report too short (len %d)\n", + len); + return; + } + len -= 2; + + switch (buf[0]) { /* command id */ + case R12A_C2H_TX_REPORT: + /* NOTREACHED */ + KASSERT(0, ("use handle_tx_report() instead of %s\n", + __func__)); + break; + } +} +#else +void +r92e_handle_c2h_report(struct rtwn_softc *sc, uint8_t *buf, int len) +{ + + /* Should not happen. */ + device_printf(sc->sc_dev, "%s: called\n", __func__); +} +#endif + +int8_t +r92e_get_rssi_cck(struct rtwn_softc *sc, void *physt) +{ + + return (10 + r88e_get_rssi_cck(sc, physt)); +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_rx.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_var.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_var.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_var.h (revision 312720) @@ -0,0 +1,54 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef R92E_VAR_H +#define R92E_VAR_H + +#include + +struct r92e_softc { + uint8_t chip; + uint8_t rs_flags; + + uint8_t regulatory; + uint8_t crystalcap; + + int8_t cck_tx_pwr[R92E_MAX_RF_PATH][R92E_GROUP_2G]; + int8_t ht40_tx_pwr_2g[R92E_MAX_RF_PATH][R92E_GROUP_2G]; + + int8_t cck_tx_pwr_diff_2g[R92E_MAX_RF_PATH][R92E_MAX_TX_COUNT]; + int8_t ofdm_tx_pwr_diff_2g[R92E_MAX_RF_PATH][R92E_MAX_TX_COUNT]; + int8_t bw20_tx_pwr_diff_2g[R92E_MAX_RF_PATH][R92E_MAX_TX_COUNT]; + int8_t bw40_tx_pwr_diff_2g[R92E_MAX_RF_PATH][R92E_MAX_TX_COUNT]; + + int ac_usb_dma_size; + int ac_usb_dma_time; + uint32_t rf_chnlbw[R92E_MAX_RF_PATH]; +}; + +#endif /* R92E_VAR_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/r92e_var.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu.h (revision 312720) @@ -0,0 +1,43 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + + +#ifndef RTL8192EU_H +#define RTL8192EU_H + +#include +#include + +/* + * Function declarations. + */ +/* r92eu_init.c */ +void r92eu_init_rx_agg(struct rtwn_softc *); +void r92eu_post_init(struct rtwn_softc *); + +#endif /* RTL8192EU_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c (revision 312720) @@ -0,0 +1,202 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include + +#include + +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +void r92eu_attach(struct rtwn_usb_softc *); + +static void +r92eu_attach_private(struct rtwn_softc *sc) +{ + struct r92e_softc *rs; + + rs = malloc(sizeof(struct r92e_softc), M_RTWN_PRIV, M_WAITOK | M_ZERO); + + rs->ac_usb_dma_size = 0x06; + rs->ac_usb_dma_time = 0x20; + + sc->sc_priv = rs; +} + +void +r92e_detach_private(struct rtwn_softc *sc) +{ + struct r92e_softc *rs = sc->sc_priv; + + free(rs, M_RTWN_PRIV); +} + +static void +r92eu_adj_devcaps(struct rtwn_softc *sc) +{ + /* XXX TODO? */ +} + +void +r92eu_attach(struct rtwn_usb_softc *uc) +{ + struct rtwn_softc *sc = &uc->uc_sc; + + /* USB part. */ + uc->uc_align_rx = r12au_align_rx; + uc->tx_agg_desc_num = 3; + + /* Common part. */ + sc->sc_flags = RTWN_FLAG_EXT_HDR; + + sc->sc_set_chan = r92e_set_chan; + sc->sc_fill_tx_desc = r12a_fill_tx_desc; + sc->sc_fill_tx_desc_raw = r12a_fill_tx_desc_raw; + sc->sc_fill_tx_desc_null = r12a_fill_tx_desc_null; + sc->sc_dump_tx_desc = r12au_dump_tx_desc; + sc->sc_tx_radiotap_flags = r12a_tx_radiotap_flags; + sc->sc_rx_radiotap_flags = r12a_rx_radiotap_flags; + sc->sc_get_rx_stats = r12a_get_rx_stats; + sc->sc_get_rssi_cck = r92e_get_rssi_cck; + sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm; + sc->sc_classify_intr = r12au_classify_intr; + sc->sc_handle_tx_report = r12a_ratectl_tx_complete; + sc->sc_handle_c2h_report = r92e_handle_c2h_report; + sc->sc_check_frame = rtwn_nop_int_softc_mbuf; + sc->sc_rf_read = r92e_rf_read; + sc->sc_rf_write = r92e_rf_write; + sc->sc_check_condition = r92c_check_condition; + sc->sc_efuse_postread = rtwn_nop_softc; + sc->sc_parse_rom = r92e_parse_rom; + sc->sc_set_led = r92e_set_led; + sc->sc_power_on = r92e_power_on; + sc->sc_power_off = r92e_power_off; +#ifndef RTWN_WITHOUT_UCODE + sc->sc_fw_reset = r92e_fw_reset; + sc->sc_fw_download_enable = r12a_fw_download_enable; +#endif + sc->sc_llt_init = r92e_llt_init; + sc->sc_set_page_size = rtwn_nop_int_softc; + sc->sc_lc_calib = r92c_lc_calib; + sc->sc_iq_calib = r88e_iq_calib; /* XXX TODO */ + sc->sc_read_chipid_vendor = rtwn_nop_softc_uint32; + sc->sc_adj_devcaps = r92eu_adj_devcaps; + sc->sc_vap_preattach = rtwn_nop_softc_vap; + sc->sc_postattach = rtwn_nop_softc; + sc->sc_detach_private = r92e_detach_private; + sc->sc_set_media_status = r92e_set_media_status; +#ifndef RTWN_WITHOUT_UCODE + sc->sc_set_rsvd_page = r88e_set_rsvd_page; + sc->sc_set_pwrmode = r92e_set_pwrmode; + sc->sc_set_rssi = rtwn_nop_softc; /* XXX TODO? */ +#endif + sc->sc_beacon_init = r12a_beacon_init; + sc->sc_beacon_enable = r92c_beacon_enable; + sc->sc_beacon_set_rate = rtwn_nop_void_int; + sc->sc_beacon_select = r21a_beacon_select; + sc->sc_temp_measure = r88e_temp_measure; + sc->sc_temp_read = r88e_temp_read; + sc->sc_init_tx_agg = r21au_init_tx_agg; + sc->sc_init_rx_agg = r92eu_init_rx_agg; + sc->sc_init_ampdu = rtwn_nop_softc; + sc->sc_init_intr = r12a_init_intr; + sc->sc_init_edca = r92c_init_edca; + sc->sc_init_bb = r92e_init_bb; + sc->sc_init_rf = r92e_init_rf; + sc->sc_init_antsel = rtwn_nop_softc; + sc->sc_post_init = r92eu_post_init; + sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc; + + sc->mac_prog = &rtl8192eu_mac[0]; + sc->mac_size = nitems(rtl8192eu_mac); + sc->bb_prog = &rtl8192eu_bb[0]; + sc->bb_size = nitems(rtl8192eu_bb); + sc->agc_prog = &rtl8192eu_agc[0]; + sc->agc_size = nitems(rtl8192eu_agc); + sc->rf_prog = &rtl8192eu_rf[0]; + + sc->name = "RTL8192EU"; + sc->fwname = "rtwn-rtl8192eufw"; + sc->fwsig = 0x92e; + + sc->page_count = R92E_TX_PAGE_COUNT; + sc->pktbuf_count = 0; /* Unused */ + sc->ackto = 0x40; + sc->npubqpages = R92E_PUBQ_NPAGES; + sc->page_size = R92E_TX_PAGE_SIZE; + sc->txdesc_len = sizeof(struct r12au_tx_desc); + sc->efuse_maxlen = R92E_EFUSE_MAX_LEN; + sc->efuse_maplen = R92E_EFUSE_MAP_LEN; + sc->rx_dma_size = R92E_RX_DMA_BUFFER_SIZE; + + sc->macid_limit = R12A_MACID_MAX + 1; + sc->cam_entry_limit = R12A_CAM_ENTRY_COUNT; + sc->fwsize_limit = R92E_MAX_FW_SIZE; + sc->temp_delta = R88E_CALIB_THRESHOLD; + + sc->bcn_status_reg[0] = R92C_TDECTRL; + sc->bcn_status_reg[1] = R21A_DWBCN1_CTRL; + sc->rcr = 0; + + sc->ntxchains = 2; + sc->nrxchains = 2; + + r92eu_attach_private(sc); +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_init.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_init.c (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_init.c (revision 312720) @@ -0,0 +1,96 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_wlan.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include + +void +r92eu_init_rx_agg(struct rtwn_softc *sc) +{ + struct r92e_softc *rs = sc->sc_priv; + + /* Rx aggregation (USB). */ + rtwn_setbits_1(sc, R12A_RXDMA_PRO, 0x20, 0x1e); + rtwn_write_4(sc, R92C_RXDMA_AGG_PG_TH, + rs->ac_usb_dma_size | (rs->ac_usb_dma_time << 8)); + rtwn_setbits_1(sc, R92C_TRXDMA_CTRL, 0, + R92C_TRXDMA_CTRL_RXDMA_AGG_EN); +} + +void +r92eu_post_init(struct rtwn_softc *sc) +{ + + /* Setup RTS BW (equal to data BW). */ + rtwn_setbits_1(sc, R92C_QUEUE_CTRL, 0x08, 0); + + /* Reset USB mode switch setting. */ + rtwn_write_1(sc, R92C_ACLK_MON, 0); + + rtwn_write_1(sc, R92C_USB_HRPWM, 0); + +#ifndef RTWN_WITHOUT_UCODE + if (sc->sc_flags & RTWN_FW_LOADED) { + if (sc->sc_ratectl_sysctl == RTWN_RATECTL_FW) { + /* TODO: implement */ + sc->sc_ratectl = RTWN_RATECTL_NET80211; + } else + sc->sc_ratectl = sc->sc_ratectl_sysctl; + } else +#endif + sc->sc_ratectl = RTWN_RATECTL_NONE; +} Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_init.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_reg.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_reg.h (nonexistent) +++ projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_reg.h (revision 312720) @@ -0,0 +1,36 @@ +/*- + * Copyright (c) 2017 Kevin Lo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef R92EU_REG_H +#define R92EU_REG_H + +#include +#include +#include + +#endif /* R92EU_REG_H */ Property changes on: projects/clang400-import/sys/dev/rtwn/rtl8192e/usb/r92eu_reg.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/dev/rtwn/rtl8812a/r12a_beacon.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8812a/r12a_beacon.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8812a/r12a_beacon.c (revision 312720) @@ -1,94 +1,96 @@ /*- * Copyright (c) 2016 Andriy Voskoboinyk * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void r12a_beacon_init(struct rtwn_softc *sc, void *buf, int id) { struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf; txd->flags0 = R12A_FLAGS0_LSG | R12A_FLAGS0_FSG | R12A_FLAGS0_BMCAST; /* * NB: there is no need to setup HWSEQ_EN bit; * QSEL_BEACON already implies it. */ txd->txdw1 = htole32(SM(R12A_TXDW1_QSEL, R12A_TXDW1_QSEL_BEACON)); txd->txdw1 |= htole32(SM(R12A_TXDW1_MACID, RTWN_MACID_BC)); txd->txdw3 = htole32(R12A_TXDW3_DRVRATE); txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, id)); + txd->txdw4 = htole32(SM(R12A_TXDW4_DATARATE, RTWN_RIDX_CCK1)); + txd->txdw6 = htole32(SM(R21A_TXDW6_MBSSID, id)); } void r12a_beacon_set_rate(void *buf, int is5ghz) { struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf; txd->txdw4 &= ~htole32(R12A_TXDW4_DATARATE_M); if (is5ghz) { txd->txdw4 = htole32(SM(R12A_TXDW4_DATARATE, RTWN_RIDX_OFDM6)); } else txd->txdw4 = htole32(SM(R12A_TXDW4_DATARATE, RTWN_RIDX_CCK1)); } Index: projects/clang400-import/sys/dev/rtwn/rtl8812a/r12a_fw.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8812a/r12a_fw.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8812a/r12a_fw.c (revision 312720) @@ -1,194 +1,194 @@ /*- * Copyright (c) 2016 Andriy Voskoboinyk * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef RTWN_WITHOUT_UCODE void r12a_fw_reset(struct rtwn_softc *sc, int reason) { /* Reset MCU IO wrapper. */ - rtwn_setbits_1(sc, R92C_RSV_CTRL, 0x02, 0); + rtwn_setbits_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00, 0); rtwn_setbits_1(sc, R92C_RSV_CTRL + 1, 0x08, 0); rtwn_setbits_1_shift(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_CPUEN, 0, 1); /* Enable MCU IO wrapper. */ - rtwn_setbits_1(sc, R92C_RSV_CTRL, 0x02, 0); + rtwn_setbits_1(sc, R92C_RSV_CTRL, R92C_RSV_CTRL_WLOCK_00, 0); rtwn_setbits_1(sc, R92C_RSV_CTRL + 1, 0, 0x08); rtwn_setbits_1_shift(sc, R92C_SYS_FUNC_EN, 0, R92C_SYS_FUNC_EN_CPUEN, 1); } void r12a_fw_download_enable(struct rtwn_softc *sc, int enable) { if (enable) { /* MCU firmware download enable. */ rtwn_setbits_1(sc, R92C_MCUFWDL, 0, R92C_MCUFWDL_EN); /* 8051 reset. */ rtwn_setbits_1_shift(sc, R92C_MCUFWDL, R92C_MCUFWDL_ROM_DLEN, 0, 2); } else { /* MCU download disable. */ rtwn_setbits_1(sc, R92C_MCUFWDL, R92C_MCUFWDL_EN, 0); } } void r12a_set_media_status(struct rtwn_softc *sc, int macid) { struct r12a_fw_cmd_msrrpt status; int error; if (macid & RTWN_MACID_VALID) status.msrb0 = R12A_MSRRPT_B0_ASSOC; else status.msrb0 = R12A_MSRRPT_B0_DISASSOC; status.macid = (macid & ~RTWN_MACID_VALID); status.macid_end = 0; error = r88e_fw_cmd(sc, R12A_CMD_MSR_RPT, &status, sizeof(status)); if (error != 0) device_printf(sc->sc_dev, "cannot change media status!\n"); } int r12a_set_pwrmode(struct rtwn_softc *sc, struct ieee80211vap *vap, int off) { struct r12a_fw_cmd_pwrmode mode; int error; if (off && vap->iv_state == IEEE80211_S_RUN && (vap->iv_flags & IEEE80211_F_PMGTON)) { mode.mode = R88E_PWRMODE_LEG; /* * TODO: switch to RFOFF state * (something is missing here - Rx stops with it). */ #ifdef RTWN_TODO mode.pwr_state = R88E_PWRMODE_STATE_RFOFF; #else mode.pwr_state = R88E_PWRMODE_STATE_RFON; #endif } else { mode.mode = R88E_PWRMODE_CAM; mode.pwr_state = R88E_PWRMODE_STATE_ALLON; } mode.pwrb1 = SM(R88E_PWRMODE_B1_SMART_PS, R88E_PWRMODE_B1_LEG_NULLDATA) | SM(R88E_PWRMODE_B1_RLBM, R88E_PWRMODE_B1_MODE_MIN); /* XXX ignored */ mode.bcn_pass = 0; mode.queue_uapsd = 0; mode.pwrb5 = R12A_PWRMODE_B5_NO_BTCOEX; error = r88e_fw_cmd(sc, R12A_CMD_SET_PWRMODE, &mode, sizeof(mode)); if (error != 0) { device_printf(sc->sc_dev, "%s: CMD_SET_PWRMODE was not sent, error %d\n", __func__, error); } return (error); } void r12a_iq_calib_fw(struct rtwn_softc *sc) { struct r12a_softc *rs = sc->sc_priv; struct ieee80211_channel *c = sc->sc_ic.ic_curchan; struct r12a_fw_cmd_iq_calib cmd; if (rs->rs_flags & R12A_IQK_RUNNING) return; RTWN_DPRINTF(sc, RTWN_DEBUG_CALIB, "Starting IQ calibration (FW)\n"); cmd.chan = rtwn_chan2centieee(c); if (IEEE80211_IS_CHAN_5GHZ(c)) cmd.band_bw = RTWN_CMD_IQ_BAND_5GHZ; else cmd.band_bw = RTWN_CMD_IQ_BAND_2GHZ; /* TODO: 80/160 MHz. */ if (IEEE80211_IS_CHAN_HT40(c)) cmd.band_bw |= RTWN_CMD_IQ_CHAN_WIDTH_40; else cmd.band_bw |= RTWN_CMD_IQ_CHAN_WIDTH_20; cmd.ext_5g_pa_lna = RTWN_CMD_IQ_EXT_PA_5G(rs->ext_pa_5g); cmd.ext_5g_pa_lna |= RTWN_CMD_IQ_EXT_LNA_5G(rs->ext_lna_5g); if (r88e_fw_cmd(sc, R12A_CMD_IQ_CALIBRATE, &cmd, sizeof(cmd)) != 0) { RTWN_DPRINTF(sc, RTWN_DEBUG_CALIB, "error while sending IQ calibration command to FW!\n"); return; } rs->rs_flags |= R12A_IQK_RUNNING; } #endif Index: projects/clang400-import/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c (revision 312720) @@ -1,297 +1,298 @@ /*- * Copyright (c) 2016 Andriy Voskoboinyk * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void r12au_attach(struct rtwn_usb_softc *); static void r12au_postattach(struct rtwn_softc *sc) { struct rtwn_usb_softc *uc = RTWN_USB_SOFTC(sc); struct r12a_softc *rs = sc->sc_priv; if (usbd_get_speed(uc->uc_udev) == USB_SPEED_SUPER) { rs->ac_usb_dma_size = 0x07; rs->ac_usb_dma_time = 0x1a; } else { rs->ac_usb_dma_size = 0x01; rs->ac_usb_dma_time = 0x10; } if (rs->chip & R12A_CHIP_C_CUT) sc->sc_rf_read = r12a_c_cut_rf_read; else sc->sc_rf_read = r12a_rf_read; if (rs->board_type == R92C_BOARD_TYPE_MINICARD || rs->board_type == R92C_BOARD_TYPE_SOLO || rs->board_type == R92C_BOARD_TYPE_COMBO) sc->sc_set_led = r88e_set_led; else sc->sc_set_led = r12a_set_led; if (!(rs->ext_pa_2g || rs->ext_lna_2g || rs->ext_pa_5g || rs->ext_lna_5g)) sc->mac_prog = &rtl8812au_mac_no_ext_pa_lna[0]; sc->sc_ic.ic_ioctl = r12a_ioctl_net; } void r12a_vap_preattach(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct r12a_softc *rs = sc->sc_priv; struct ifnet *ifp = vap->iv_ifp; ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; RTWN_LOCK(sc); if (rs->rs_flags & R12A_RXCKSUM_EN) ifp->if_capenable |= IFCAP_RXCSUM; if (rs->rs_flags & R12A_RXCKSUM6_EN) ifp->if_capenable |= IFCAP_RXCSUM_IPV6; RTWN_UNLOCK(sc); } static void r12a_attach_private(struct rtwn_softc *sc) { struct r12a_softc *rs; rs = malloc(sizeof(struct r12a_softc), M_RTWN_PRIV, M_WAITOK | M_ZERO); rs->rs_flags = R12A_RXCKSUM_EN | R12A_RXCKSUM6_EN; rs->rs_fix_spur = r12a_fix_spur; rs->rs_set_band_2ghz = r12a_set_band_2ghz; rs->rs_set_band_5ghz = r12a_set_band_5ghz; rs->rs_init_burstlen = r12au_init_burstlen; rs->rs_init_ampdu_fwhw = r12au_init_ampdu_fwhw; rs->rs_crystalcap_write = r12a_crystalcap_write; #ifndef RTWN_WITHOUT_UCODE rs->rs_iq_calib_fw_supported = r12a_iq_calib_fw_supported; #endif rs->rs_iq_calib_sw = r12a_iq_calib_sw; rs->ampdu_max_time = 0x70; sc->sc_priv = rs; } void r12a_detach_private(struct rtwn_softc *sc) { struct r12a_softc *rs = sc->sc_priv; free(rs, M_RTWN_PRIV); } static void r12a_read_chipid_vendor(struct rtwn_softc *sc, uint32_t reg_sys_cfg) { struct r12a_softc *rs = sc->sc_priv; if (MS(reg_sys_cfg, R92C_SYS_CFG_CHIP_VER_RTL) == 1) rs->chip |= R12A_CHIP_C_CUT; } static void r12au_adj_devcaps(struct rtwn_softc *sc) { struct r12a_softc *rs = sc->sc_priv; struct ieee80211com *ic = &sc->sc_ic; if (rs->chip & R12A_CHIP_C_CUT) { ic->ic_htcaps |= IEEE80211_HTCAP_LDPC | IEEE80211_HTC_TXLDPC; } /* TODO: STBC, VHT etc */ } void r12au_attach(struct rtwn_usb_softc *uc) { struct rtwn_softc *sc = &uc->uc_sc; /* USB part. */ uc->uc_align_rx = r12au_align_rx; uc->tx_agg_desc_num = 1; /* Common part. */ sc->sc_flags = RTWN_FLAG_EXT_HDR; sc->sc_set_chan = r12a_set_chan; sc->sc_fill_tx_desc = r12a_fill_tx_desc; sc->sc_fill_tx_desc_raw = r12a_fill_tx_desc_raw; sc->sc_fill_tx_desc_null = r12a_fill_tx_desc_null; sc->sc_dump_tx_desc = r12au_dump_tx_desc; sc->sc_tx_radiotap_flags = r12a_tx_radiotap_flags; sc->sc_rx_radiotap_flags = r12a_rx_radiotap_flags; sc->sc_get_rx_stats = r12a_get_rx_stats; sc->sc_get_rssi_cck = r88e_get_rssi_cck; sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm; sc->sc_classify_intr = r12au_classify_intr; sc->sc_handle_tx_report = r12a_ratectl_tx_complete; sc->sc_handle_c2h_report = r12a_handle_c2h_report; sc->sc_check_frame = r12a_check_frame_checksum; sc->sc_rf_write = r12a_rf_write; sc->sc_check_condition = r12a_check_condition; sc->sc_efuse_postread = rtwn_nop_softc; sc->sc_parse_rom = r12a_parse_rom; sc->sc_power_on = r12a_power_on; sc->sc_power_off = r12a_power_off; #ifndef RTWN_WITHOUT_UCODE sc->sc_fw_reset = r12a_fw_reset; sc->sc_fw_download_enable = r12a_fw_download_enable; #endif + sc->sc_llt_init = r92c_llt_init; sc->sc_set_page_size = r12a_set_page_size; sc->sc_lc_calib = r12a_lc_calib; sc->sc_iq_calib = r12a_iq_calib; sc->sc_read_chipid_vendor = r12a_read_chipid_vendor; sc->sc_adj_devcaps = r12au_adj_devcaps; sc->sc_vap_preattach = r12a_vap_preattach; sc->sc_postattach = r12au_postattach; sc->sc_detach_private = r12a_detach_private; #ifndef RTWN_WITHOUT_UCODE sc->sc_set_media_status = r12a_set_media_status; sc->sc_set_rsvd_page = r88e_set_rsvd_page; sc->sc_set_pwrmode = r12a_set_pwrmode; sc->sc_set_rssi = rtwn_nop_softc; /* XXX TODO */ #else sc->sc_set_media_status = rtwn_nop_softc_int; #endif sc->sc_beacon_init = r12a_beacon_init; sc->sc_beacon_enable = r92c_beacon_enable; sc->sc_beacon_set_rate = r12a_beacon_set_rate; sc->sc_beacon_select = rtwn_nop_softc_int; sc->sc_temp_measure = r88e_temp_measure; sc->sc_temp_read = r88e_temp_read; sc->sc_init_tx_agg = r92cu_init_tx_agg; sc->sc_init_rx_agg = r12au_init_rx_agg; sc->sc_init_ampdu = r12au_init_ampdu; sc->sc_init_intr = r12a_init_intr; sc->sc_init_edca = r12a_init_edca; sc->sc_init_bb = r12a_init_bb; sc->sc_init_rf = r12a_init_rf; sc->sc_init_antsel = r12a_init_antsel; sc->sc_post_init = r12au_post_init; sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc; sc->chan_list_5ghz[0] = r12a_chan_5ghz_0; sc->chan_list_5ghz[1] = r12a_chan_5ghz_1; sc->chan_list_5ghz[2] = r12a_chan_5ghz_2; sc->chan_num_5ghz[0] = nitems(r12a_chan_5ghz_0); sc->chan_num_5ghz[1] = nitems(r12a_chan_5ghz_1); sc->chan_num_5ghz[2] = nitems(r12a_chan_5ghz_2); sc->mac_prog = &rtl8812au_mac[0]; sc->mac_size = nitems(rtl8812au_mac); sc->bb_prog = &rtl8812au_bb[0]; sc->bb_size = nitems(rtl8812au_bb); sc->agc_prog = &rtl8812au_agc[0]; sc->agc_size = nitems(rtl8812au_agc); sc->rf_prog = &rtl8812au_rf[0]; sc->name = "RTL8812AU"; sc->fwname = "rtwn-rtl8812aufw"; sc->fwsig = 0x950; sc->page_count = R12A_TX_PAGE_COUNT; sc->pktbuf_count = R12A_TXPKTBUF_COUNT; sc->ackto = 0x80; sc->npubqpages = R12A_PUBQ_NPAGES; sc->page_size = R12A_TX_PAGE_SIZE; sc->txdesc_len = sizeof(struct r12au_tx_desc); sc->efuse_maxlen = R12A_EFUSE_MAX_LEN; sc->efuse_maplen = R12A_EFUSE_MAP_LEN; sc->rx_dma_size = R12A_RX_DMA_BUFFER_SIZE; sc->macid_limit = R12A_MACID_MAX + 1; sc->cam_entry_limit = R12A_CAM_ENTRY_COUNT; sc->fwsize_limit = R12A_MAX_FW_SIZE; sc->temp_delta = R88E_CALIB_THRESHOLD; sc->bcn_status_reg[0] = R92C_TDECTRL; sc->bcn_status_reg[1] = R92C_TDECTRL; sc->rcr = R12A_RCR_DIS_CHK_14 | R12A_RCR_VHT_ACK | R12A_RCR_TCP_OFFLD_EN; sc->ntxchains = 2; sc->nrxchains = 2; r12a_attach_private(sc); } Index: projects/clang400-import/sys/dev/rtwn/rtl8821a/r21a_init.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8821a/r21a_init.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8821a/r21a_init.c (revision 312720) @@ -1,358 +1,358 @@ /*- * Copyright (c) 2016 Andriy Voskoboinyk * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int r21a_power_on(struct rtwn_softc *sc) { #define RTWN_CHK(res) do { \ if (res != 0) \ return (EIO); \ } while(0) int ntries; /* Clear suspend and power down bits.*/ RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_APDM_HPDN, 0, 1)); /* Disable GPIO9 as EXT WAKEUP. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_GPIO_INTM + 2, 0x01, 0)); /* Enable WL suspend. */ RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE, 0, 1)); /* Enable LDOA12 MACRO block for all interfaces. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_LDOA15_CTRL, 0, R92C_LDOA15_CTRL_EN)); /* Disable BT_GPS_SEL pins. */ RTWN_CHK(rtwn_setbits_1(sc, 0x067, 0x10, 0)); /* 1 ms delay. */ rtwn_delay(sc, 1000); /* Release analog Ips to digital isolation. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_SYS_ISO_CTRL, R92C_SYS_ISO_CTRL_IP2MAC, 0)); /* Disable SW LPS and WL suspend. */ RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_APFM_RSM | R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE, 0, 1)); /* Wait for power ready bit. */ for (ntries = 0; ntries < 5000; ntries++) { if (rtwn_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST) break; rtwn_delay(sc, 10); } if (ntries == 5000) { device_printf(sc->sc_dev, "timeout waiting for chip power up\n"); return (ETIMEDOUT); } /* Release WLON reset. */ RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, R92C_APS_FSMCO_RDY_MACON, 2)); /* Disable HWPDN. */ RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_APDM_HPDN, 0, 1)); /* Disable WL suspend. */ RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE, 0, 1)); RTWN_CHK(rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, R92C_APS_FSMCO_APFM_ONMAC, 1)); for (ntries = 0; ntries < 5000; ntries++) { if (!(rtwn_read_2(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_APFM_ONMAC)) break; rtwn_delay(sc, 10); } if (ntries == 5000) return (ETIMEDOUT); /* Switch DPDT_SEL_P output from WL BB. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_LEDCFG3, 0, 0x01)); /* switch for PAPE_G/PAPE_A from WL BB; switch LNAON from WL BB. */ RTWN_CHK(rtwn_setbits_1(sc, 0x067, 0, 0x30)); RTWN_CHK(rtwn_setbits_1(sc, 0x025, 0x40, 0)); /* Enable falling edge triggering interrupt. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_GPIO_INTM + 1, 0, 0x02)); /* Enable GPIO9 interrupt mode. */ RTWN_CHK(rtwn_setbits_1(sc, 0x063, 0, 0x02)); /* Enable GPIO9 input mode. */ RTWN_CHK(rtwn_setbits_1(sc, 0x062, 0x02, 0)); /* Enable HSISR GPIO interrupt. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_HSIMR, 0, 0x01)); /* Enable HSISR GPIO9 interrupt. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_HSIMR + 2, 0, 0x02)); /* XTAL trim. */ RTWN_CHK(rtwn_setbits_1(sc, R92C_APE_PLL_CTRL_EXT + 2, 0xFF, 0x82)); RTWN_CHK(rtwn_setbits_1(sc, R92C_AFE_MISC, 0, 0x40)); /* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */ RTWN_CHK(rtwn_write_2(sc, R92C_CR, 0x0000)); RTWN_CHK(rtwn_setbits_2(sc, R92C_CR, 0, R92C_CR_HCI_TXDMA_EN | R92C_CR_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN | ((sc->sc_hwcrypto != RTWN_CRYPTO_SW) ? R92C_CR_ENSEC : 0) | R92C_CR_CALTMR_EN)); if (rtwn_read_4(sc, R92C_SYS_CFG) & R92C_SYS_CFG_TRP_BT_EN) - RTWN_CHK(rtwn_setbits_1(sc, 0x07C, 0, 0x40)); + RTWN_CHK(rtwn_setbits_1(sc, R92C_LDO_SWR_CTRL, 0, 0x40)); return (0); #undef RTWN_CHK } void r21a_power_off(struct rtwn_softc *sc) { struct r12a_softc *rs = sc->sc_priv; int error, ntries; /* Stop Rx. */ error = rtwn_write_1(sc, R92C_CR, 0); if (error == ENXIO) /* hardware gone */ return; /* Move card to Low Power state. */ /* Block all Tx queues. */ rtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); for (ntries = 0; ntries < 10; ntries++) { /* Should be zero if no packet is transmitting. */ if (rtwn_read_4(sc, R88E_SCH_TXCMD) == 0) break; rtwn_delay(sc, 5000); } if (ntries == 10) { device_printf(sc->sc_dev, "%s: failed to block Tx queues\n", __func__); return; } /* CCK and OFDM are disabled, and clock are gated. */ rtwn_setbits_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_BBRSTB, 0); rtwn_delay(sc, 1); /* Reset whole BB. */ rtwn_setbits_1(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_BB_GLB_RST, 0); /* Reset MAC TRX. */ rtwn_write_1(sc, R92C_CR, R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN); /* check if removed later. (?) */ rtwn_setbits_1_shift(sc, R92C_CR, R92C_CR_ENSEC, 0, 1); /* Respond TxOK to scheduler */ rtwn_setbits_1(sc, R92C_DUAL_TSF_RST, 0, R92C_DUAL_TSF_RST_TXOK); /* If firmware in ram code, do reset. */ #ifndef RTWN_WITHOUT_UCODE if (rtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) r21a_fw_reset(sc, RTWN_FW_RESET_SHUTDOWN); #endif /* Reset MCU. */ rtwn_setbits_1_shift(sc, R92C_SYS_FUNC_EN, R92C_SYS_FUNC_EN_CPUEN, 0, 1); rtwn_write_1(sc, R92C_MCUFWDL, 0); /* Move card to Disabled state. */ /* Turn off RF. */ rtwn_write_1(sc, R92C_RF_CTRL, 0); rtwn_setbits_1(sc, R92C_LEDCFG3, 0x01, 0); /* Enable rising edge triggering interrupt. */ rtwn_setbits_1(sc, R92C_GPIO_INTM + 1, 0x02, 0); /* Release WLON reset. */ rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, R92C_APS_FSMCO_RDY_MACON, 2); /* Turn off MAC by HW state machine */ rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0, R92C_APS_FSMCO_APFM_OFF, 1); for (ntries = 0; ntries < 10; ntries++) { /* Wait until it will be disabled. */ if ((rtwn_read_2(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_APFM_OFF) == 0) break; rtwn_delay(sc, 5000); } if (ntries == 10) { device_printf(sc->sc_dev, "%s: could not turn off MAC\n", __func__); return; } /* Analog Ips to digital isolation. */ rtwn_setbits_1(sc, R92C_SYS_ISO_CTRL, 0, R92C_SYS_ISO_CTRL_IP2MAC); /* Disable LDOA12 MACRO block. */ rtwn_setbits_1(sc, R92C_LDOA15_CTRL, R92C_LDOA15_CTRL_EN, 0); /* Enable WL suspend. */ rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_PCIE, R92C_APS_FSMCO_AFSM_HSUS, 1); /* Enable GPIO9 as EXT WAKEUP. */ rtwn_setbits_1(sc, R92C_GPIO_INTM + 2, 0, 0x01); rs->rs_flags &= ~(R12A_IQK_RUNNING | R12A_RADAR_ENABLED); } int r21a_check_condition(struct rtwn_softc *sc, const uint8_t cond[]) { struct r12a_softc *rs = sc->sc_priv; uint8_t mask; int i; RTWN_DPRINTF(sc, RTWN_DEBUG_RESET, "%s: condition byte 0: %02X; ext 5ghz pa/lna %d/%d\n", __func__, cond[0], rs->ext_pa_5g, rs->ext_lna_5g); if (cond[0] == 0) return (1); mask = 0; if (rs->ext_pa_5g) mask |= R21A_COND_EXT_PA_5G; if (rs->ext_lna_5g) mask |= R21A_COND_EXT_LNA_5G; if (rs->bt_coex) mask |= R21A_COND_BT; if (!rs->ext_pa_2g && !rs->ext_lna_2g && !rs->ext_pa_5g && !rs->ext_lna_5g && !rs->bt_coex) mask = R21A_COND_BOARD_DEF; if (mask == 0) return (0); for (i = 0; i < RTWN_MAX_CONDITIONS && cond[i] != 0; i++) if (cond[i] == mask) return (1); return (0); } void r21a_crystalcap_write(struct rtwn_softc *sc) { struct r12a_softc *rs = sc->sc_priv; uint32_t reg; uint8_t val; val = rs->crystalcap & 0x3f; reg = rtwn_bb_read(sc, R92C_MAC_PHY_CTRL); reg = RW(reg, R21A_MAC_PHY_CRYSTALCAP, val | (val << 6)); rtwn_bb_write(sc, R92C_MAC_PHY_CTRL, reg); } int r21a_init_bcnq1_boundary(struct rtwn_softc *sc) { #define RTWN_CHK(res) do { \ if (res != 0) \ return (EIO); \ } while(0) RTWN_CHK(rtwn_write_1(sc, R88E_TXPKTBUF_BCNQ1_BDNY, R21A_BCNQ0_BOUNDARY)); RTWN_CHK(rtwn_write_1(sc, R21A_DWBCN1_CTRL + 1, R21A_BCNQ0_BOUNDARY)); RTWN_CHK(rtwn_setbits_1_shift(sc, R21A_DWBCN1_CTRL, 0, R21A_DWBCN1_CTRL_SEL_EN, 2)); return (0); #undef RTWN_CHK } void r21a_init_ampdu_fwhw(struct rtwn_softc *sc) { rtwn_write_1(sc, R92C_FWHW_TXQ_CTRL, R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW); rtwn_write_4(sc, R92C_FAST_EDCA_CTRL, 0x03087777); } Index: projects/clang400-import/sys/dev/rtwn/rtl8821a/usb/r21au_attach.c =================================================================== --- projects/clang400-import/sys/dev/rtwn/rtl8821a/usb/r21au_attach.c (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/rtl8821a/usb/r21au_attach.c (revision 312720) @@ -1,283 +1,284 @@ /*- * Copyright (c) 2016 Andriy Voskoboinyk * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void r21au_attach(struct rtwn_usb_softc *); static void r21a_postattach(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct r12a_softc *rs = sc->sc_priv; if (rs->board_type == R92C_BOARD_TYPE_MINICARD || rs->board_type == R92C_BOARD_TYPE_SOLO || rs->board_type == R92C_BOARD_TYPE_COMBO) sc->sc_set_led = r88e_set_led; else sc->sc_set_led = r21a_set_led; TIMEOUT_TASK_INIT(taskqueue_thread, &rs->rs_chan_check, 0, r21au_chan_check, sc); /* RXCKSUM */ ic->ic_ioctl = r12a_ioctl_net; /* DFS */ rs->rs_scan_start = ic->ic_scan_start; ic->ic_scan_start = r21au_scan_start; rs->rs_scan_end = ic->ic_scan_end; ic->ic_scan_end = r21au_scan_end; } static void r21au_vap_preattach(struct rtwn_softc *sc, struct ieee80211vap *vap) { struct rtwn_vap *rvp = RTWN_VAP(vap); struct r12a_softc *rs = sc->sc_priv; r12a_vap_preattach(sc, vap); /* Install DFS newstate handler (non-monitor vaps only). */ if (rvp->id != RTWN_VAP_ID_INVALID) { KASSERT(rvp->id >= 0 && rvp->id <= nitems(rs->rs_newstate), ("%s: wrong vap id %d\n", __func__, rvp->id)); rs->rs_newstate[rvp->id] = vap->iv_newstate; vap->iv_newstate = r21au_newstate; } } static void r21a_attach_private(struct rtwn_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct r12a_softc *rs; rs = malloc(sizeof(struct r12a_softc), M_RTWN_PRIV, M_WAITOK | M_ZERO); rs->rs_flags = R12A_RXCKSUM_EN | R12A_RXCKSUM6_EN; rs->rs_radar = 0; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "radar_detection", CTLFLAG_RDTUN, &rs->rs_radar, rs->rs_radar, "Enable radar detection (untested)"); rs->rs_fix_spur = rtwn_nop_softc_chan; rs->rs_set_band_2ghz = r21a_set_band_2ghz; rs->rs_set_band_5ghz = r21a_set_band_5ghz; rs->rs_init_burstlen = r21au_init_burstlen; rs->rs_init_ampdu_fwhw = r21a_init_ampdu_fwhw; rs->rs_crystalcap_write = r21a_crystalcap_write; #ifndef RTWN_WITHOUT_UCODE rs->rs_iq_calib_fw_supported = r21a_iq_calib_fw_supported; #endif rs->rs_iq_calib_sw = r21a_iq_calib_sw; rs->ampdu_max_time = 0x5e; rs->ac_usb_dma_size = 0x01; rs->ac_usb_dma_time = 0x10; sc->sc_priv = rs; } static void r21au_adj_devcaps(struct rtwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct r12a_softc *rs = sc->sc_priv; ic->ic_htcaps |= IEEE80211_HTC_TXLDPC; if (rs->rs_radar != 0) ic->ic_caps |= IEEE80211_C_DFS; /* TODO: VHT */ } void r21au_attach(struct rtwn_usb_softc *uc) { struct rtwn_softc *sc = &uc->uc_sc; /* USB part. */ uc->uc_align_rx = r12au_align_rx; uc->tx_agg_desc_num = 6; /* Common part. */ sc->sc_flags = RTWN_FLAG_EXT_HDR; sc->sc_set_chan = r12a_set_chan; sc->sc_fill_tx_desc = r12a_fill_tx_desc; sc->sc_fill_tx_desc_raw = r12a_fill_tx_desc_raw; sc->sc_fill_tx_desc_null = r12a_fill_tx_desc_null; sc->sc_dump_tx_desc = r12au_dump_tx_desc; sc->sc_tx_radiotap_flags = r12a_tx_radiotap_flags; sc->sc_rx_radiotap_flags = r12a_rx_radiotap_flags; sc->sc_get_rx_stats = r12a_get_rx_stats; sc->sc_get_rssi_cck = r21a_get_rssi_cck; sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm; sc->sc_classify_intr = r12au_classify_intr; sc->sc_handle_tx_report = r12a_ratectl_tx_complete; sc->sc_handle_c2h_report = r12a_handle_c2h_report; sc->sc_check_frame = r12a_check_frame_checksum; sc->sc_rf_read = r12a_c_cut_rf_read; sc->sc_rf_write = r12a_rf_write; sc->sc_check_condition = r21a_check_condition; sc->sc_efuse_postread = rtwn_nop_softc; sc->sc_parse_rom = r21a_parse_rom; sc->sc_power_on = r21a_power_on; sc->sc_power_off = r21a_power_off; #ifndef RTWN_WITHOUT_UCODE sc->sc_fw_reset = r21a_fw_reset; sc->sc_fw_download_enable = r12a_fw_download_enable; #endif + sc->sc_llt_init = r92c_llt_init; sc->sc_set_page_size = rtwn_nop_int_softc; sc->sc_lc_calib = rtwn_nop_softc; /* XXX not used */ sc->sc_iq_calib = r12a_iq_calib; sc->sc_read_chipid_vendor = rtwn_nop_softc_uint32; sc->sc_adj_devcaps = r21au_adj_devcaps; sc->sc_vap_preattach = r21au_vap_preattach; sc->sc_postattach = r21a_postattach; sc->sc_detach_private = r12a_detach_private; #ifndef RTWN_WITHOUT_UCODE sc->sc_set_media_status = r12a_set_media_status; sc->sc_set_rsvd_page = r88e_set_rsvd_page; sc->sc_set_pwrmode = r12a_set_pwrmode; sc->sc_set_rssi = rtwn_nop_softc; /* XXX TODO */ #else sc->sc_set_media_status = rtwn_nop_softc_int; #endif sc->sc_beacon_init = r21a_beacon_init; sc->sc_beacon_enable = r92c_beacon_enable; sc->sc_beacon_set_rate = r12a_beacon_set_rate; sc->sc_beacon_select = r21a_beacon_select; sc->sc_temp_measure = r88e_temp_measure; sc->sc_temp_read = r88e_temp_read; sc->sc_init_tx_agg = r21au_init_tx_agg; sc->sc_init_rx_agg = r12au_init_rx_agg; sc->sc_init_ampdu = r12au_init_ampdu; sc->sc_init_intr = r12a_init_intr; sc->sc_init_edca = r12a_init_edca; sc->sc_init_bb = r12a_init_bb; sc->sc_init_rf = r12a_init_rf; sc->sc_init_antsel = r12a_init_antsel; sc->sc_post_init = r12au_post_init; sc->sc_init_bcnq1_boundary = r21a_init_bcnq1_boundary; sc->chan_list_5ghz[0] = r12a_chan_5ghz_0; sc->chan_list_5ghz[1] = r12a_chan_5ghz_1; sc->chan_list_5ghz[2] = r12a_chan_5ghz_2; sc->chan_num_5ghz[0] = nitems(r12a_chan_5ghz_0); sc->chan_num_5ghz[1] = nitems(r12a_chan_5ghz_1); sc->chan_num_5ghz[2] = nitems(r12a_chan_5ghz_2); sc->mac_prog = &rtl8821au_mac[0]; sc->mac_size = nitems(rtl8821au_mac); sc->bb_prog = &rtl8821au_bb[0]; sc->bb_size = nitems(rtl8821au_bb); sc->agc_prog = &rtl8821au_agc[0]; sc->agc_size = nitems(rtl8821au_agc); sc->rf_prog = &rtl8821au_rf[0]; sc->name = "RTL8821AU"; sc->fwname = "rtwn-rtl8821aufw"; sc->fwsig = 0x210; sc->page_count = R21A_TX_PAGE_COUNT; sc->pktbuf_count = R12A_TXPKTBUF_COUNT; sc->ackto = 0x80; sc->npubqpages = R12A_PUBQ_NPAGES; sc->page_size = R21A_TX_PAGE_SIZE; sc->txdesc_len = sizeof(struct r12au_tx_desc); sc->efuse_maxlen = R12A_EFUSE_MAX_LEN; sc->efuse_maplen = R12A_EFUSE_MAP_LEN; sc->rx_dma_size = R12A_RX_DMA_BUFFER_SIZE; sc->macid_limit = R12A_MACID_MAX + 1; sc->cam_entry_limit = R12A_CAM_ENTRY_COUNT; sc->fwsize_limit = R12A_MAX_FW_SIZE; sc->temp_delta = R88E_CALIB_THRESHOLD; sc->bcn_status_reg[0] = R92C_TDECTRL; sc->bcn_status_reg[1] = R21A_DWBCN1_CTRL; sc->rcr = R12A_RCR_DIS_CHK_14 | R12A_RCR_VHT_ACK | R12A_RCR_TCP_OFFLD_EN; sc->ntxchains = 1; sc->nrxchains = 1; r21a_attach_private(sc); } Index: projects/clang400-import/sys/dev/rtwn/usb/rtwn_usb_attach.h =================================================================== --- projects/clang400-import/sys/dev/rtwn/usb/rtwn_usb_attach.h (revision 312719) +++ projects/clang400-import/sys/dev/rtwn/usb/rtwn_usb_attach.h (revision 312720) @@ -1,160 +1,171 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ void r92cu_attach(struct rtwn_usb_softc *); +void r92eu_attach(struct rtwn_usb_softc *); void r88eu_attach(struct rtwn_usb_softc *); void r12au_attach(struct rtwn_usb_softc *); void r21au_attach(struct rtwn_usb_softc *); enum { RTWN_CHIP_RTL8192CU, + RTWN_CHIP_RTL8192EU, RTWN_CHIP_RTL8188EU, RTWN_CHIP_RTL8812AU, RTWN_CHIP_RTL8821AU, RTWN_CHIP_MAX_USB }; /* various supported device vendors/products */ static const STRUCT_USB_HOST_ID rtwn_devs[] = { /* RTL8188CE-VAU/RTL8188CUS/RTL8188RU/RTL8192CU */ #define RTWN_RTL8192CU_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, RTWN_CHIP_RTL8192CU) } RTWN_RTL8192CU_DEV(ABOCOM, RTL8188CU_1), RTWN_RTL8192CU_DEV(ABOCOM, RTL8188CU_2), RTWN_RTL8192CU_DEV(ABOCOM, RTL8192CU), RTWN_RTL8192CU_DEV(ASUS, RTL8192CU), RTWN_RTL8192CU_DEV(ASUS, USBN10NANO), RTWN_RTL8192CU_DEV(AZUREWAVE, RTL8188CE_1), RTWN_RTL8192CU_DEV(AZUREWAVE, RTL8188CE_2), RTWN_RTL8192CU_DEV(AZUREWAVE, RTL8188CU), RTWN_RTL8192CU_DEV(BELKIN, F7D2102), RTWN_RTL8192CU_DEV(BELKIN, RTL8188CU), RTWN_RTL8192CU_DEV(BELKIN, RTL8192CU), RTWN_RTL8192CU_DEV(CHICONY, RTL8188CUS_1), RTWN_RTL8192CU_DEV(CHICONY, RTL8188CUS_2), RTWN_RTL8192CU_DEV(CHICONY, RTL8188CUS_3), RTWN_RTL8192CU_DEV(CHICONY, RTL8188CUS_4), RTWN_RTL8192CU_DEV(CHICONY, RTL8188CUS_5), RTWN_RTL8192CU_DEV(COREGA, RTL8192CU), RTWN_RTL8192CU_DEV(DLINK, RTL8188CU), RTWN_RTL8192CU_DEV(DLINK, RTL8192CU_1), RTWN_RTL8192CU_DEV(DLINK, RTL8192CU_2), RTWN_RTL8192CU_DEV(DLINK, RTL8192CU_3), RTWN_RTL8192CU_DEV(DLINK, DWA131B), RTWN_RTL8192CU_DEV(EDIMAX, EW7811UN), RTWN_RTL8192CU_DEV(EDIMAX, RTL8192CU), RTWN_RTL8192CU_DEV(FEIXUN, RTL8188CU), RTWN_RTL8192CU_DEV(FEIXUN, RTL8192CU), RTWN_RTL8192CU_DEV(GUILLEMOT, HWNUP150), RTWN_RTL8192CU_DEV(HAWKING, RTL8192CU), RTWN_RTL8192CU_DEV(HP3, RTL8188CU), RTWN_RTL8192CU_DEV(NETGEAR, WNA1000M), RTWN_RTL8192CU_DEV(NETGEAR, RTL8192CU), RTWN_RTL8192CU_DEV(NETGEAR4, RTL8188CU), RTWN_RTL8192CU_DEV(NOVATECH, RTL8188CU), RTWN_RTL8192CU_DEV(PLANEX2, RTL8188CU_1), RTWN_RTL8192CU_DEV(PLANEX2, RTL8188CU_2), RTWN_RTL8192CU_DEV(PLANEX2, RTL8188CU_3), RTWN_RTL8192CU_DEV(PLANEX2, RTL8188CU_4), RTWN_RTL8192CU_DEV(PLANEX2, RTL8188CUS), RTWN_RTL8192CU_DEV(PLANEX2, RTL8192CU), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CE_0), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CE_1), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CTV), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CU_0), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CU_1), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CU_2), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CU_3), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CU_COMBO), RTWN_RTL8192CU_DEV(REALTEK, RTL8188CUS), RTWN_RTL8192CU_DEV(REALTEK, RTL8188RU_1), RTWN_RTL8192CU_DEV(REALTEK, RTL8188RU_2), RTWN_RTL8192CU_DEV(REALTEK, RTL8188RU_3), RTWN_RTL8192CU_DEV(REALTEK, RTL8191CU), RTWN_RTL8192CU_DEV(REALTEK, RTL8192CE), RTWN_RTL8192CU_DEV(REALTEK, RTL8192CU), - RTWN_RTL8192CU_DEV(REALTEK, RTL8192CU_1), RTWN_RTL8192CU_DEV(SITECOMEU, RTL8188CU_1), RTWN_RTL8192CU_DEV(SITECOMEU, RTL8188CU_2), RTWN_RTL8192CU_DEV(SITECOMEU, RTL8192CU), RTWN_RTL8192CU_DEV(TRENDNET, RTL8188CU), RTWN_RTL8192CU_DEV(TRENDNET, RTL8192CU), RTWN_RTL8192CU_DEV(ZYXEL, RTL8192CU), #undef RTWN_RTL8192CU_DEV + /* RTL8192EU */ +#define RTWN_RTL8192EU_DEV(v,p) \ + { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, RTWN_CHIP_RTL8192EU) } + RTWN_RTL8192EU_DEV(DLINK, DWA131E1), + RTWN_RTL8192EU_DEV(REALTEK, RTL8192EU), + RTWN_RTL8192EU_DEV(TPLINK, WN822NV4), + RTWN_RTL8192EU_DEV(TPLINK, WN823NV2), +#undef RTWN_RTL8192EU_DEV + /* RTL8188EU */ #define RTWN_RTL8188EU_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, RTWN_CHIP_RTL8188EU) } RTWN_RTL8188EU_DEV(ABOCOM, RTL8188EU), RTWN_RTL8188EU_DEV(DLINK, DWA123D1), RTWN_RTL8188EU_DEV(DLINK, DWA125D1), RTWN_RTL8188EU_DEV(ELECOM, WDC150SU2M), RTWN_RTL8188EU_DEV(REALTEK, RTL8188ETV), RTWN_RTL8188EU_DEV(REALTEK, RTL8188EU), #undef RTWN_RTL8188EU_DEV /* RTL8812AU */ #define RTWN_RTL8812AU_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, RTWN_CHIP_RTL8812AU) } RTWN_RTL8812AU_DEV(ASUS, USBAC56), RTWN_RTL8812AU_DEV(CISCOLINKSYS, WUSB6300), RTWN_RTL8812AU_DEV(DLINK, DWA182C1), RTWN_RTL8812AU_DEV(DLINK, DWA180A1), RTWN_RTL8812AU_DEV(EDIMAX, EW7822UAC), RTWN_RTL8812AU_DEV(IODATA, WNAC867U), RTWN_RTL8812AU_DEV(MELCO, WIU3866D), RTWN_RTL8812AU_DEV(NEC, WL900U), RTWN_RTL8812AU_DEV(PLANEX2, GW900D), RTWN_RTL8812AU_DEV(SENAO, EUB1200AC), RTWN_RTL8812AU_DEV(SITECOMEU, WLA7100), RTWN_RTL8812AU_DEV(TPLINK, T4U), RTWN_RTL8812AU_DEV(TRENDNET, TEW805UB), RTWN_RTL8812AU_DEV(ZYXEL, NWD6605), #undef RTWN_RTL8812AU_DEV /* RTL8821AU */ #define RTWN_RTL8821AU_DEV(v,p) \ { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, RTWN_CHIP_RTL8821AU) } RTWN_RTL8821AU_DEV(DLINK, DWA171A1), RTWN_RTL8821AU_DEV(DLINK, DWA172A1), RTWN_RTL8821AU_DEV(EDIMAX, EW7811UTC_1), RTWN_RTL8821AU_DEV(EDIMAX, EW7811UTC_2), RTWN_RTL8821AU_DEV(HAWKING, HD65U), RTWN_RTL8821AU_DEV(MELCO, WIU2433DM), RTWN_RTL8821AU_DEV(NETGEAR, A6100) #undef RTWN_RTL8821AU_DEV }; typedef void (*chip_usb_attach)(struct rtwn_usb_softc *); static const chip_usb_attach rtwn_chip_usb_attach[RTWN_CHIP_MAX_USB] = { [RTWN_CHIP_RTL8192CU] = r92cu_attach, + [RTWN_CHIP_RTL8192EU] = r92eu_attach, [RTWN_CHIP_RTL8188EU] = r88eu_attach, [RTWN_CHIP_RTL8812AU] = r12au_attach, [RTWN_CHIP_RTL8821AU] = r21au_attach }; static __inline void rtwn_usb_attach_private(struct rtwn_usb_softc *uc, int chip) { rtwn_chip_usb_attach[chip](uc); } Index: projects/clang400-import/sys/dev/usb/usbdevs =================================================================== --- projects/clang400-import/sys/dev/usb/usbdevs (revision 312719) +++ projects/clang400-import/sys/dev/usb/usbdevs (revision 312720) @@ -1,4742 +1,4744 @@ $FreeBSD$ /* $NetBSD: usbdevs,v 1.392 2004/12/29 08:38:44 imp Exp $ */ /*- * Copyright (c) 1998-2004 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (lennart@augustsson.net) at * Carlstedt Research & Technology. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * List of known USB vendors * * USB.org publishes a VID list of USB-IF member companies at * http://www.usb.org/developers/tools * Note that it does not show companies that have obtained a Vendor ID * without becoming full members. * * Please note that these IDs do not do anything. Adding an ID here and * regenerating the usbdevs.h and usbdevs_data.h only makes a symbolic name * available to the source code and does not change any functionality, nor * does it make your device available to a specific driver. * It will however make the descriptive string available if a device does not * provide the string itself. * * After adding a vendor ID VNDR and a product ID PRDCT you will have the * following extra defines: * #define USB_VENDOR_VNDR 0x???? * #define USB_PRODUCT_VNDR_PRDCT 0x???? * * You may have to add these defines to the respective probe routines to * make the device recognised by the appropriate device driver. */ vendor UNKNOWN1 0x0053 Unknown vendor vendor UNKNOWN2 0x0105 Unknown vendor vendor EGALAX2 0x0123 eGalax, Inc. vendor CHIPSBANK 0x0204 Chipsbank Microelectronics Co. vendor HUMAX 0x02ad HUMAX vendor LTS 0x0386 LTS vendor BWCT 0x03da Bernd Walter Computer Technology vendor AOX 0x03e8 AOX vendor THESYS 0x03e9 Thesys vendor DATABROADCAST 0x03ea Data Broadcasting vendor ATMEL 0x03eb Atmel vendor IWATSU 0x03ec Iwatsu America vendor MITSUMI 0x03ee Mitsumi vendor HP 0x03f0 Hewlett Packard vendor GENOA 0x03f1 Genoa vendor OAK 0x03f2 Oak vendor ADAPTEC 0x03f3 Adaptec vendor DIEBOLD 0x03f4 Diebold vendor SIEMENSELECTRO 0x03f5 Siemens Electromechanical vendor EPSONIMAGING 0x03f8 Epson Imaging vendor KEYTRONIC 0x03f9 KeyTronic vendor OPTI 0x03fb OPTi vendor ELITEGROUP 0x03fc Elitegroup vendor XILINX 0x03fd Xilinx vendor FARALLON 0x03fe Farallon Communications vendor NATIONAL 0x0400 National Semiconductor vendor NATIONALREG 0x0401 National Registry vendor ACERLABS 0x0402 Acer Labs vendor FTDI 0x0403 Future Technology Devices vendor NCR 0x0404 NCR vendor SYNOPSYS2 0x0405 Synopsys vendor FUJITSUICL 0x0406 Fujitsu-ICL vendor FUJITSU2 0x0407 Fujitsu Personal Systems vendor QUANTA 0x0408 Quanta vendor NEC 0x0409 NEC vendor KODAK 0x040a Eastman Kodak vendor WELTREND 0x040b Weltrend vendor VIA 0x040d VIA vendor MCCI 0x040e MCCI vendor MELCO 0x0411 Melco vendor LEADTEK 0x0413 Leadtek vendor WINBOND 0x0416 Winbond vendor PHOENIX 0x041a Phoenix vendor CREATIVE 0x041e Creative Labs vendor NOKIA 0x0421 Nokia vendor ADI 0x0422 ADI Systems vendor CATC 0x0423 Computer Access Technology vendor SMC2 0x0424 Standard Microsystems vendor MOTOROLA_HK 0x0425 Motorola HK vendor GRAVIS 0x0428 Advanced Gravis Computer vendor CIRRUSLOGIC 0x0429 Cirrus Logic vendor INNOVATIVE 0x042c Innovative Semiconductors vendor MOLEX 0x042f Molex vendor SUN 0x0430 Sun Microsystems vendor UNISYS 0x0432 Unisys vendor TAUGA 0x0436 Taugagreining HF vendor AMD 0x0438 Advanced Micro Devices vendor LEXMARK 0x043d Lexmark International vendor LG 0x043e LG Electronics vendor NANAO 0x0440 NANAO vendor GATEWAY 0x0443 Gateway 2000 vendor NMB 0x0446 NMB vendor ALPS 0x044e Alps Electric vendor THRUST 0x044f Thrustmaster vendor TI 0x0451 Texas Instruments vendor ANALOGDEVICES 0x0456 Analog Devices vendor SIS 0x0457 Silicon Integrated Systems Corp. vendor KYE 0x0458 KYE Systems vendor DIAMOND2 0x045a Diamond (Supra) vendor RENESAS 0x045b Renesas vendor MICROSOFT 0x045e Microsoft vendor PRIMAX 0x0461 Primax Electronics vendor MGE 0x0463 MGE UPS Systems vendor AMP 0x0464 AMP vendor CHERRY 0x046a Cherry Mikroschalter vendor MEGATRENDS 0x046b American Megatrends vendor LOGITECH 0x046d Logitech vendor BTC 0x046e Behavior Tech. Computer vendor PHILIPS 0x0471 Philips vendor SUN2 0x0472 Sun Microsystems (official) vendor SANYO 0x0474 Sanyo Electric vendor SEAGATE 0x0477 Seagate vendor CONNECTIX 0x0478 Connectix vendor SEMTECH 0x047a Semtech vendor KENSINGTON 0x047d Kensington vendor LUCENT 0x047e Lucent vendor PLANTRONICS 0x047f Plantronics vendor KYOCERA 0x0482 Kyocera Wireless Corp. vendor STMICRO 0x0483 STMicroelectronics vendor FOXCONN 0x0489 Foxconn vendor MEIZU 0x0492 Meizu Electronics vendor YAMAHA 0x0499 YAMAHA vendor COMPAQ 0x049f Compaq vendor HITACHI 0x04a4 Hitachi vendor ACERP 0x04a5 Acer Peripherals vendor DAVICOM 0x04a6 Davicom vendor VISIONEER 0x04a7 Visioneer vendor CANON 0x04a9 Canon vendor NIKON 0x04b0 Nikon vendor PAN 0x04b1 Pan International vendor IBM 0x04b3 IBM vendor CYPRESS 0x04b4 Cypress Semiconductor vendor ROHM 0x04b5 ROHM vendor COMPAL 0x04b7 Compal vendor EPSON 0x04b8 Seiko Epson vendor RAINBOW 0x04b9 Rainbow Technologies vendor IODATA 0x04bb I-O Data vendor TDK 0x04bf TDK vendor 3COMUSR 0x04c1 U.S. Robotics vendor METHODE 0x04c2 Methode Electronics Far East vendor MAXISWITCH 0x04c3 Maxi Switch vendor LOCKHEEDMER 0x04c4 Lockheed Martin Energy Research vendor FUJITSU 0x04c5 Fujitsu vendor TOSHIBAAM 0x04c6 Toshiba America vendor MICROMACRO 0x04c7 Micro Macro Technologies vendor KONICA 0x04c8 Konica vendor LITEON 0x04ca Lite-On Technology vendor FUJIPHOTO 0x04cb Fuji Photo Film vendor PHILIPSSEMI 0x04cc Philips Semiconductors vendor TATUNG 0x04cd Tatung Co. Of America vendor SCANLOGIC 0x04ce ScanLogic vendor MYSON 0x04cf Myson Technology vendor DIGI2 0x04d0 Digi vendor ITTCANON 0x04d1 ITT Canon vendor ALTEC 0x04d2 Altec Lansing vendor LSI 0x04d4 LSI vendor MENTORGRAPHICS 0x04d6 Mentor Graphics vendor ITUNERNET 0x04d8 I-Tuner Networks vendor HOLTEK 0x04d9 Holtek Semiconductor, Inc. vendor PANASONIC 0x04da Panasonic (Matsushita) vendor HUANHSIN 0x04dc Huan Hsin vendor SHARP 0x04dd Sharp vendor IIYAMA 0x04e1 Iiyama vendor SHUTTLE 0x04e6 Shuttle Technology vendor ELO 0x04e7 Elo TouchSystems vendor SAMSUNG 0x04e8 Samsung Electronics vendor NORTHSTAR 0x04eb Northstar vendor TOKYOELECTRON 0x04ec Tokyo Electron vendor ANNABOOKS 0x04ed Annabooks vendor JVC 0x04f1 JVC vendor CHICONY 0x04f2 Chicony Electronics vendor ELAN 0x04f3 Elan vendor NEWNEX 0x04f7 Newnex vendor BROTHER 0x04f9 Brother Industries vendor DALLAS 0x04fa Dallas Semiconductor vendor AIPTEK2 0x04fc AIPTEK International vendor PFU 0x04fe PFU vendor FUJIKURA 0x0501 Fujikura/DDK vendor ACER 0x0502 Acer vendor 3COM 0x0506 3Com vendor HOSIDEN 0x0507 Hosiden Corporation vendor AZTECH 0x0509 Aztech Systems vendor BELKIN 0x050d Belkin Components vendor KAWATSU 0x050f Kawatsu Semiconductor vendor FCI 0x0514 FCI vendor LONGWELL 0x0516 Longwell vendor COMPOSITE 0x0518 Composite vendor STAR 0x0519 Star Micronics vendor APC 0x051d American Power Conversion vendor SCIATLANTA 0x051e Scientific Atlanta vendor TSM 0x0520 TSM vendor CONNECTEK 0x0522 Advanced Connectek USA vendor NETCHIP 0x0525 NetChip Technology vendor ALTRA 0x0527 ALTRA vendor ATI 0x0528 ATI Technologies vendor AKS 0x0529 Aladdin Knowledge Systems vendor TEKOM 0x052b Tekom vendor CANONDEV 0x052c Canon vendor WACOMTECH 0x0531 Wacom vendor INVENTEC 0x0537 Inventec vendor SHYHSHIUN 0x0539 Shyh Shiun Terminals vendor PREHWERKE 0x053a Preh Werke Gmbh & Co. KG vendor SYNOPSYS 0x053f Synopsys vendor UNIACCESS 0x0540 Universal Access vendor VIEWSONIC 0x0543 ViewSonic vendor XIRLINK 0x0545 Xirlink vendor ANCHOR 0x0547 Anchor Chips vendor SONY 0x054c Sony vendor FUJIXEROX 0x0550 Fuji Xerox vendor VISION 0x0553 VLSI Vision vendor ASAHIKASEI 0x0556 Asahi Kasei Microsystems vendor ATEN 0x0557 ATEN International vendor SAMSUNG2 0x055d Samsung Electronics vendor MUSTEK 0x055f Mustek Systems vendor TELEX 0x0562 Telex Communications vendor CHINON 0x0564 Chinon vendor PERACOM 0x0565 Peracom Networks vendor ALCOR2 0x0566 Alcor Micro vendor XYRATEX 0x0567 Xyratex vendor WACOM 0x056a WACOM vendor ETEK 0x056c e-TEK Labs vendor EIZO 0x056d EIZO vendor ELECOM 0x056e Elecom vendor CONEXANT 0x0572 Conexant vendor HAUPPAUGE 0x0573 Hauppauge Computer Works vendor BAFO 0x0576 BAFO/Quality Computer Accessories vendor YEDATA 0x057b Y-E Data vendor AVM 0x057c AVM vendor QUICKSHOT 0x057f Quickshot vendor ROLAND 0x0582 Roland vendor ROCKFIRE 0x0583 Rockfire vendor RATOC 0x0584 RATOC Systems vendor ZYXEL 0x0586 ZyXEL Communication vendor INFINEON 0x058b Infineon vendor MICREL 0x058d Micrel vendor ALCOR 0x058f Alcor Micro vendor OMRON 0x0590 OMRON vendor ZORAN 0x0595 Zoran Microelectronics vendor NIIGATA 0x0598 Niigata vendor IOMEGA 0x059b Iomega vendor ATREND 0x059c A-Trend Technology vendor AID 0x059d Advanced Input Devices vendor LACIE 0x059f LaCie vendor FUJIFILM 0x05a2 Fuji Film vendor ARC 0x05a3 ARC vendor ORTEK 0x05a4 Ortek vendor CISCOLINKSYS3 0x05a6 Cisco-Linksys vendor BOSE 0x05a7 Bose vendor OMNIVISION 0x05a9 OmniVision vendor INSYSTEM 0x05ab In-System Design vendor APPLE 0x05ac Apple Computer vendor YCCABLE 0x05ad Y.C. Cable vendor DIGITALPERSONA 0x05ba DigitalPersona vendor 3G 0x05bc 3G Green Green Globe vendor RAFI 0x05bd RAFI vendor TYCO 0x05be Tyco vendor KAWASAKI 0x05c1 Kawasaki vendor DIGI 0x05c5 Digi International vendor QUALCOMM2 0x05c6 Qualcomm vendor QTRONIX 0x05c7 Qtronix vendor FOXLINK 0x05c8 Foxlink vendor RICOH 0x05ca Ricoh vendor ELSA 0x05cc ELSA vendor SCIWORX 0x05ce sci-worx vendor BRAINBOXES 0x05d1 Brainboxes Limited vendor ULTIMA 0x05d8 Ultima vendor AXIOHM 0x05d9 Axiohm Transaction Solutions vendor MICROTEK 0x05da Microtek vendor SUNTAC 0x05db SUN Corporation vendor LEXAR 0x05dc Lexar Media vendor ADDTRON 0x05dd Addtron vendor SYMBOL 0x05e0 Symbol Technologies vendor SYNTEK 0x05e1 Syntek vendor GENESYS 0x05e3 Genesys Logic vendor FUJI 0x05e5 Fuji Electric vendor KEITHLEY 0x05e6 Keithley Instruments vendor EIZONANAO 0x05e7 EIZO Nanao vendor KLSI 0x05e9 Kawasaki LSI vendor FFC 0x05eb FFC vendor ANKO 0x05ef Anko Electronic vendor PIENGINEERING 0x05f3 P.I. Engineering vendor AOC 0x05f6 AOC International vendor CHIC 0x05fe Chic Technology vendor BARCO 0x0600 Barco Display Systems vendor BRIDGE 0x0607 Bridge Information vendor SOLIDYEAR 0x060b Solid Year vendor BIORAD 0x0614 Bio-Rad Laboratories vendor MACALLY 0x0618 Macally vendor ACTLABS 0x061c Act Labs vendor ALARIS 0x0620 Alaris vendor APEX 0x0624 Apex vendor CREATIVE3 0x062a Creative Labs vendor MICRON 0x0634 Micron Technology vendor VIVITAR 0x0636 Vivitar vendor GUNZE 0x0637 Gunze Electronics USA vendor AVISION 0x0638 Avision vendor TEAC 0x0644 TEAC vendor ACTON 0x0647 Acton Research Corp. vendor OPTO 0x065a Optoelectronics Co., Ltd vendor SGI 0x065e Silicon Graphics vendor SANWASUPPLY 0x0663 Sanwa Supply vendor MEGATEC 0x0665 Megatec vendor LINKSYS 0x066b Linksys vendor ACERSA 0x066e Acer Semiconductor America vendor SIGMATEL 0x066f Sigmatel vendor DRAYTEK 0x0675 DrayTek vendor AIWA 0x0677 Aiwa vendor ACARD 0x0678 ACARD Technology vendor PROLIFIC 0x067b Prolific Technology vendor SIEMENS 0x067c Siemens vendor AVANCELOGIC 0x0680 Avance Logic vendor SIEMENS2 0x0681 Siemens vendor MINOLTA 0x0686 Minolta vendor CHPRODUCTS 0x068e CH Products vendor HAGIWARA 0x0693 Hagiwara Sys-Com vendor CTX 0x0698 Chuntex vendor ASKEY 0x069a Askey Computer vendor SAITEK 0x06a3 Saitek vendor ALCATELT 0x06b9 Alcatel Telecom vendor AGFA 0x06bd AGFA-Gevaert vendor ASIAMD 0x06be Asia Microelectronic Development vendor BIZLINK 0x06c4 Bizlink International vendor KEYSPAN 0x06cd Keyspan / InnoSys Inc. vendor CONTEC 0x06ce Contec products vendor AASHIMA 0x06d6 Aashima Technology vendor LIEBERT 0x06da Liebert vendor MULTITECH 0x06e0 MultiTech vendor ADS 0x06e1 ADS Technologies vendor ALCATELM 0x06e4 Alcatel Microelectronics vendor SIRIUS 0x06ea Sirius Technologies vendor GUILLEMOT 0x06f8 Guillemot vendor BOSTON 0x06fd Boston Acoustics vendor SMC 0x0707 Standard Microsystems vendor PUTERCOM 0x0708 Putercom vendor MCT 0x0711 MCT vendor IMATION 0x0718 Imation vendor TECLAST 0x071b Teclast vendor SONYERICSSON 0x0731 Sony Ericsson vendor EICON 0x0734 Eicon Networks vendor SYNTECH 0x0745 Syntech Information vendor DIGITALSTREAM 0x074e Digital Stream vendor AUREAL 0x0755 Aureal Semiconductor vendor MAUDIO 0x0763 M-Audio vendor CYBERPOWER 0x0764 Cyber Power Systems, Inc. vendor SURECOM 0x0769 Surecom Technology vendor HIDGLOBAL 0x076b HID Global vendor LINKSYS2 0x077b Linksys vendor GRIFFIN 0x077d Griffin Technology vendor SANDISK 0x0781 SanDisk vendor JENOPTIK 0x0784 Jenoptik vendor LOGITEC 0x0789 Logitec vendor NOKIA2 0x078b Nokia vendor BRIMAX 0x078e Brimax vendor AXIS 0x0792 Axis Communications vendor ABL 0x0794 ABL Electronics vendor SAGEM 0x079b Sagem vendor SUNCOMM 0x079c Sun Communications, Inc. vendor ALFADATA 0x079d Alfadata Computer vendor NATIONALTECH 0x07a2 National Technical Systems vendor ONNTO 0x07a3 Onnto vendor BE 0x07a4 Be vendor ADMTEK 0x07a6 ADMtek vendor COREGA 0x07aa Corega vendor FREECOM 0x07ab Freecom vendor MICROTECH 0x07af Microtech vendor GENERALINSTMNTS 0x07b2 General Instruments (Motorola) vendor OLYMPUS 0x07b4 Olympus vendor ABOCOM 0x07b8 AboCom Systems vendor KEISOKUGIKEN 0x07c1 Keisokugiken vendor ONSPEC 0x07c4 OnSpec vendor APG 0x07c5 APG Cash Drawer vendor BUG 0x07c8 B.U.G. vendor ALLIEDTELESYN 0x07c9 Allied Telesyn International vendor AVERMEDIA 0x07ca AVerMedia Technologies vendor SIIG 0x07cc SIIG vendor CASIO 0x07cf CASIO vendor DLINK2 0x07d1 D-Link vendor APTIO 0x07d2 Aptio Products vendor ARASAN 0x07da Arasan Chip Systems vendor ALLIEDCABLE 0x07e6 Allied Cable vendor STSN 0x07ef STSN vendor CENTURY 0x07f7 Century Corp vendor NEWLINK 0x07ff NEWlink vendor MAGTEK 0x0801 Mag-Tek vendor ZOOM 0x0803 Zoom Telephonics vendor PCS 0x0810 Personal Communication Systems vendor ALPHASMART 0x081e AlphaSmart, Inc. vendor BROADLOGIC 0x0827 BroadLogic vendor HANDSPRING 0x082d Handspring vendor PALM 0x0830 Palm Computing vendor SOURCENEXT 0x0833 SOURCENEXT vendor ACTIONSTAR 0x0835 Action Star Enterprise vendor SAMSUNG_TECHWIN 0x0839 Samsung Techwin vendor ACCTON 0x083a Accton Technology vendor DIAMOND 0x0841 Diamond vendor NETGEAR 0x0846 BayNETGEAR vendor TOPRE 0x0853 Topre Corporation vendor ACTIVEWIRE 0x0854 ActiveWire vendor BBELECTRONICS 0x0856 B&B Electronics vendor PORTGEAR 0x085a PortGear vendor NETGEAR2 0x0864 Netgear vendor SYSTEMTALKS 0x086e System Talks vendor METRICOM 0x0870 Metricom vendor ADESSOKBTEK 0x087c ADESSO/Kbtek America vendor JATON 0x087d Jaton vendor APT 0x0880 APT Technologies vendor BOCARESEARCH 0x0885 Boca Research vendor ANDREA 0x08a8 Andrea Electronics vendor BURRBROWN 0x08bb Burr-Brown Japan vendor 2WIRE 0x08c8 2Wire vendor AIPTEK 0x08ca AIPTEK International vendor SMARTBRIDGES 0x08d1 SmartBridges vendor FUJITSUSIEMENS 0x08d4 Fujitsu-Siemens vendor BILLIONTON 0x08dd Billionton Systems vendor GEMALTO 0x08e6 Gemalto SA vendor EXTENDED 0x08e9 Extended Systems vendor MSYSTEMS 0x08ec M-Systems vendor DIGIANSWER 0x08fd Digianswer vendor AUTHENTEC 0x08ff AuthenTec vendor AUDIOTECHNICA 0x0909 Audio-Technica vendor TRUMPION 0x090a Trumpion Microelectronics vendor FEIYA 0x090c Feiya vendor ALATION 0x0910 Alation Systems vendor GLOBESPAN 0x0915 Globespan vendor CONCORDCAMERA 0x0919 Concord Camera vendor GARMIN 0x091e Garmin International vendor GOHUBS 0x0921 GoHubs vendor DYMO 0x0922 DYMO vendor XEROX 0x0924 Xerox vendor BIOMETRIC 0x0929 American Biometric Company vendor TOSHIBA 0x0930 Toshiba vendor PLEXTOR 0x093b Plextor vendor INTREPIDCS 0x093c Intrepid vendor YANO 0x094f Yano vendor KINGSTON 0x0951 Kingston Technology vendor BLUEWATER 0x0956 BlueWater Systems vendor AGILENT 0x0957 Agilent Technologies vendor GUDE 0x0959 Gude ADS vendor PORTSMITH 0x095a Portsmith vendor ACERW 0x0967 Acer vendor ADIRONDACK 0x0976 Adirondack Wire & Cable vendor BECKHOFF 0x0978 Beckhoff vendor MINDSATWORK 0x097a Minds At Work vendor POINTCHIPS 0x09a6 PointChips vendor INTERSIL 0x09aa Intersil vendor ALTIUS 0x09b3 Altius Solutions vendor ARRIS 0x09c1 Arris Interactive vendor ACTIVCARD 0x09c3 ACTIVCARD vendor ACTISYS 0x09c4 ACTiSYS vendor NOVATEL2 0x09d7 Novatel Wireless vendor AFOURTECH 0x09da A-FOUR TECH vendor AIMEX 0x09dc AIMEX vendor ADDONICS 0x09df Addonics Technologies vendor AKAI 0x09e8 AKAI professional M.I. vendor ARESCOM 0x09f5 ARESCOM vendor BAY 0x09f9 Bay Associates vendor ALTERA 0x09fb Altera vendor CSR 0x0a12 Cambridge Silicon Radio vendor TREK 0x0a16 Trek Technology vendor ASAHIOPTICAL 0x0a17 Asahi Optical vendor BOCASYSTEMS 0x0a43 Boca Systems vendor SHANTOU 0x0a46 ShanTou vendor MEDIAGEAR 0x0a48 MediaGear vendor BROADCOM 0x0a5c Broadcom vendor GREENHOUSE 0x0a6b GREENHOUSE vendor MEDELI 0x0a67 Medeli vendor GEOCAST 0x0a79 Geocast Network Systems vendor EGO 0x0a92 EGO systems vendor IDQUANTIQUE 0x0aba ID Quantique vendor IDTECH 0x0acd ID TECH vendor ZYDAS 0x0ace Zydas Technology Corporation vendor NEODIO 0x0aec Neodio vendor OPTION 0x0af0 Option N.V. vendor ASUS 0x0b05 ASUSTeK Computer vendor TODOS 0x0b0c Todos Data System vendor SIIG2 0x0b39 SIIG vendor TEKRAM 0x0b3b Tekram Technology vendor HAL 0x0b41 HAL Corporation vendor EMS 0x0b43 EMS Production vendor NEC2 0x0b62 NEC vendor ADLINK 0x0b63 ADLINK Technoligy, Inc. vendor ATI2 0x0b6f ATI vendor ZEEVO 0x0b7a Zeevo, Inc. vendor KURUSUGAWA 0x0b7e Kurusugawa Electronics, Inc. vendor SMART 0x0b8c Smart Technologies vendor ASIX 0x0b95 ASIX Electronics vendor O2MICRO 0x0b97 O2 Micro, Inc. vendor USR 0x0baf U.S. Robotics vendor AMBIT 0x0bb2 Ambit Microsystems vendor HTC 0x0bb4 HTC vendor REALTEK 0x0bda Realtek vendor ERICSSON2 0x0bdb Ericsson vendor MEI 0x0bed MEI vendor ADDONICS2 0x0bf6 Addonics Technology vendor FSC 0x0bf8 Fujitsu Siemens Computers vendor AGATE 0x0c08 Agate Technologies vendor DMI 0x0c0b DMI vendor CANYON 0x0c10 Canyon vendor ICOM 0x0c26 Icom Inc. vendor GNOTOMETRICS 0x0c33 GN Otometrics vendor CHICONY2 0x0c45 Chicony / Microdia / Sonix Technology Co., Ltd. vendor REINERSCT 0x0c4b Reiner-SCT vendor SEALEVEL 0x0c52 Sealevel System vendor JETI 0x0c6c Jeti vendor LUWEN 0x0c76 Luwen vendor ELEKTOR 0x0c7d ELEKTOR Electronics vendor KYOCERA2 0x0c88 Kyocera Wireless Corp. vendor ZCOM 0x0cde Z-Com vendor ATHEROS2 0x0cf3 Atheros Communications vendor POSIFLEX 0x0d3a POSIFLEX vendor TANGTOP 0x0d3d Tangtop vendor KOBIL 0x0d46 KOBIL vendor SMC3 0x0d5c Standard Microsystems vendor ADDON 0x0d7d Add-on Technology vendor ACDC 0x0d7e American Computer & Digital Components vendor CMEDIA 0x0d8c CMEDIA vendor CONCEPTRONIC 0x0d8e Conceptronic vendor SKANHEX 0x0d96 Skanhex Technology, Inc. vendor MSI 0x0db0 Micro Star International vendor ELCON 0x0db7 ELCON Systemtechnik vendor UNKNOWN4 0x0dcd Unknown vendor vendor NETAC 0x0dd8 Netac vendor SITECOMEU 0x0df6 Sitecom Europe vendor MOBILEACTION 0x0df7 Mobile Action vendor AMIGO 0x0e0b Amigo Technology vendor SPEEDDRAGON 0x0e55 Speed Dragon Multimedia vendor HAWKING 0x0e66 Hawking vendor FOSSIL 0x0e67 Fossil, Inc vendor GMATE 0x0e7e G.Mate, Inc vendor MEDIATEK 0x0e8d MediaTek, Inc. vendor OTI 0x0ea0 Ours Technology vendor YISO 0x0eab Yiso Wireless Co. vendor PILOTECH 0x0eaf Pilotech vendor NOVATECH 0x0eb0 NovaTech vendor ITEGNO 0x0eba iTegno vendor WINMAXGROUP 0x0ed1 WinMaxGroup vendor TOD 0x0ede TOD vendor EGALAX 0x0eef eGalax, Inc. vendor AIRPRIME 0x0f3d AirPrime, Inc. vendor MICROTUNE 0x0f4d Microtune vendor VTECH 0x0f88 VTech vendor FALCOM 0x0f94 Falcom Wireless Communications GmbH vendor RIM 0x0fca Research In Motion vendor DYNASTREAM 0x0fcf Dynastream Innovations vendor LARSENBRUSGAARD 0x0fd8 Larsen and Brusgaard vendor OWL 0x0fde OWL vendor KONTRON 0x0fe6 Kontron AG vendor QUALCOMM 0x1004 Qualcomm vendor APACER 0x1005 Apacer vendor MOTOROLA4 0x100d Motorola vendor HP3 0x103c Hewlett Packard vendor AIRPLUS 0x1011 Airplus vendor DESKNOTE 0x1019 Desknote vendor NEC3 0x1033 NEC vendor TTI 0x103e Thurlby Thandar Instruments vendor GIGABYTE 0x1044 GIGABYTE vendor WESTERN 0x1058 Western Digital vendor MOTOROLA 0x1063 Motorola vendor CCYU 0x1065 CCYU Technology vendor CURITEL 0x106c Curitel Communications Inc vendor SILABS2 0x10a6 SILABS2 vendor USI 0x10ab USI vendor LIEBERT2 0x10af Liebert vendor PLX 0x10b5 PLX vendor ASANTE 0x10bd Asante vendor SILABS 0x10c4 Silicon Labs vendor SILABS3 0x10c5 Silicon Labs vendor SILABS4 0x10ce Silicon Labs vendor ACTIONS 0x10d6 Actions vendor ANALOG 0x1110 Analog Devices vendor TENX 0x1130 Ten X Technology, Inc. vendor ISSC 0x1131 Integrated System Solution Corp. vendor JRC 0x1145 Japan Radio Company vendor SPHAIRON 0x114b Sphairon Access Systems GmbH vendor DELORME 0x1163 DeLorme vendor SERVERWORKS 0x1166 ServerWorks vendor DLINK3 0x1186 Dlink vendor ACERCM 0x1189 Acer Communications & Multimedia vendor SIERRA 0x1199 Sierra Wireless vendor SANWA 0x11ad Sanwa Electric Instrument Co., Ltd. vendor TOPFIELD 0x11db Topfield Co., Ltd vendor SIEMENS3 0x11f5 Siemens vendor NETINDEX 0x11f6 NetIndex vendor ALCATEL 0x11f7 Alcatel vendor INTERBIOMETRICS 0x1209 Interbiometrics vendor UNKNOWN3 0x1233 Unknown vendor vendor TSUNAMI 0x1241 Tsunami vendor PHEENET 0x124a Pheenet vendor TARGUS 0x1267 Targus vendor TWINMOS 0x126f TwinMOS vendor TENDA 0x1286 Tenda vendor TESTO 0x128d Testo products vendor CREATIVE2 0x1292 Creative Labs vendor BELKIN2 0x1293 Belkin Components vendor CYBERTAN 0x129b CyberTAN Technology vendor HUAWEI 0x12d1 Huawei Technologies vendor ARANEUS 0x12d8 Araneus Information Systems vendor TAPWAVE 0x12ef Tapwave vendor AINCOMM 0x12fd Aincomm vendor MOBILITY 0x1342 Mobility vendor DICKSMITH 0x1371 Dick Smith Electronics vendor NETGEAR3 0x1385 Netgear vendor BALTECH 0x13ad Baltech vendor CISCOLINKSYS 0x13b1 Cisco-Linksys vendor SHARK 0x13d2 Shark vendor AZUREWAVE 0x13d3 AsureWave vendor INITIO 0x13fd Initio Corporation vendor EMTEC 0x13fe Emtec vendor NOVATEL 0x1410 Novatel Wireless vendor MERLIN 0x1416 Merlin vendor REDOCTANE 0x1430 RedOctane vendor WISTRONNEWEB 0x1435 Wistron NeWeb vendor RADIOSHACK 0x1453 Radio Shack vendor FIC 0x1457 FIC / OpenMoko vendor HUAWEI3COM 0x1472 Huawei-3Com vendor ABOCOM2 0x1482 AboCom Systems vendor SILICOM 0x1485 Silicom vendor RALINK 0x148f Ralink Technology vendor IMAGINATION 0x149a Imagination Technologies vendor ATP 0x14af ATP Electronics vendor CONCEPTRONIC2 0x14b2 Conceptronic vendor SUPERTOP 0x14cd Super Top vendor PLANEX3 0x14ea Planex Communications vendor SILICONPORTALS 0x1527 Silicon Portals vendor UBIQUAM 0x1529 UBIQUAM Co., Ltd. vendor JMICRON 0x152d JMicron vendor UBLOX 0x1546 U-blox vendor PNY 0x154b PNY vendor OWEN 0x1555 Owen vendor OQO 0x1557 OQO vendor UMEDIA 0x157e U-MEDIA Communications vendor FIBERLINE 0x1582 Fiberline vendor FREESCALE 0x15a2 Freescale Semiconductor, Inc. vendor AFATECH 0x15a4 Afatech Technologies, Inc. vendor SPARKLAN 0x15a9 SparkLAN vendor OLIMEX 0x15ba Olimex vendor SOUNDGRAPH 0x15c2 Soundgraph, Inc. vendor AMIT2 0x15c5 AMIT vendor TEXTECH 0x15ca Textech International Ltd. vendor SOHOWARE 0x15e8 SOHOware vendor UMAX 0x1606 UMAX Data Systems vendor INSIDEOUT 0x1608 Inside Out Networks vendor AMOI 0x1614 Amoi Electronics vendor GOODWAY 0x1631 Good Way Technology vendor ENTREGA 0x1645 Entrega vendor ACTIONTEC 0x1668 Actiontec Electronics vendor CLIPSAL 0x166a Clipsal vendor CISCOLINKSYS2 0x167b Cisco-Linksys vendor ATHEROS 0x168c Atheros Communications vendor GIGASET 0x1690 Gigaset vendor GLOBALSUN 0x16ab Global Sun Technology vendor ANYDATA 0x16d5 AnyDATA Corporation vendor JABLOTRON 0x16d6 Jablotron vendor CMOTECH 0x16d8 C-motech vendor WIENERPLEINBAUS 0x16dc WIENER Plein & Baus GmbH. vendor AXESSTEL 0x1726 Axesstel Co., Ltd. vendor LINKSYS4 0x1737 Linksys vendor SENAO 0x1740 Senao vendor ASUS2 0x1761 ASUS vendor SWEEX2 0x177f Sweex vendor METAGEEK 0x1781 MetaGeek vendor KAMSTRUP 0x17a8 Kamstrup A/S vendor DISPLAYLINK 0x17e9 DisplayLink vendor LENOVO 0x17ef Lenovo vendor WAVESENSE 0x17f4 WaveSense vendor VAISALA 0x1843 Vaisala vendor AMIT 0x18c5 AMIT vendor GOOGLE 0x18d1 Google vendor QCOM 0x18e8 Qcom vendor ELV 0x18ef ELV vendor LINKSYS3 0x1915 Linksys vendor QUALCOMMINC 0x19d2 Qualcomm, Incorporated vendor QUALCOMM3 0x19f5 Qualcomm, Inc. vendor BAYER 0x1a79 Bayer vendor WCH2 0x1a86 QinHeng Electronics vendor STELERA 0x1a8d Stelera Wireless vendor SEL 0x1adb Schweitzer Engineering Laboratories vendor CORSAIR 0x1b1c Corsair vendor MATRIXORBITAL 0x1b3d Matrix Orbital vendor OVISLINK 0x1b75 OvisLink vendor TML 0x1b91 The Mobility Lab vendor TCTMOBILE 0x1bbb TCT Mobile vendor ALTI2 0x1bc9 Alti-2 products vendor SUNPLUS 0x1bcf Sunplus Innovation Technology Inc. vendor WAGO 0x1be3 WAGO Kontakttechnik GmbH. vendor TELIT 0x1bc7 Telit vendor IONICS 0x1c0c Ionics PlugComputer vendor LONGCHEER 0x1c9e Longcheer Holdings, Ltd. vendor MPMAN 0x1cae MpMan vendor DRESDENELEKTRONIK 0x1cf1 dresden elektronik vendor NEOTEL 0x1d09 Neotel vendor DREAMLINK 0x1d34 Dream Link vendor PEGATRON 0x1d4d Pegatron vendor QISDA 0x1da5 Qisda vendor METAGEEK2 0x1dd5 MetaGeek vendor ALINK 0x1e0e Alink vendor AIRTIES 0x1eda AirTies vendor FESTO 0x1e29 Festo vendor LAKESHORE 0x1fb9 Lake Shore Cryotronics, Inc. vendor VERTEX 0x1fe7 Vertex Wireless Co., Ltd. vendor DLINK 0x2001 D-Link vendor PLANEX2 0x2019 Planex Communications vendor HAUPPAUGE2 0x2040 Hauppauge Computer Works vendor TLAYTECH 0x20b9 Tlay Tech vendor ENCORE 0x203d Encore vendor QIHARDWARE 0x20b7 QI-hardware vendor PARA 0x20b8 PARA Industrial vendor SIMTEC 0x20df Simtec Electronics vendor TRENDNET 0x20f4 TRENDnet vendor RTSYSTEMS 0x2100 RTSYSTEMS vendor VIALABS 0x2109 VIA Labs vendor ERICSSON 0x2282 Ericsson vendor MOTOROLA2 0x22b8 Motorola vendor WETELECOM 0x22de WeTelecom vendor TPLINK 0x2357 TP-Link vendor WESTMOUNTAIN 0x2405 West Mountain Radio vendor TRIPPLITE 0x2478 Tripp-Lite vendor HIROSE 0x2631 Hirose Electric vendor NHJ 0x2770 NHJ vendor PLANEX 0x2c02 Planex Communications vendor VIDZMEDIA 0x3275 VidzMedia Pte Ltd vendor LINKINSTRUMENTS 0x3195 Link Instruments Inc. vendor AEI 0x3334 AEI vendor HANK 0x3353 Hank Connection vendor PQI 0x3538 PQI vendor DAISY 0x3579 Daisy Technology vendor NI 0x3923 National Instruments vendor MICRONET 0x3980 Micronet Communications vendor IODATA2 0x40bb I-O Data vendor IRIVER 0x4102 iRiver vendor DELL 0x413c Dell vendor WCH 0x4348 QinHeng Electronics vendor ACEECA 0x4766 Aceeca vendor FEIXUN 0x4855 FeiXun Communication vendor PAPOUCH 0x5050 Papouch products vendor AVERATEC 0x50c2 Averatec vendor SWEEX 0x5173 Sweex vendor PROLIFIC2 0x5372 Prolific Technologies vendor ONSPEC2 0x55aa OnSpec Electronic Inc. vendor ZINWELL 0x5a57 Zinwell vendor SITECOM 0x6189 Sitecom vendor ARKMICRO 0x6547 Arkmicro Technologies Inc. vendor 3COM2 0x6891 3Com vendor EDIMAX 0x7392 Edimax vendor INTEL 0x8086 Intel vendor INTEL2 0x8087 Intel vendor ALLWIN 0x8516 ALLWIN Tech vendor SITECOM2 0x9016 Sitecom vendor MOSCHIP 0x9710 MosChip Semiconductor vendor NETGEAR4 0x9846 Netgear vendor MARVELL 0x9e88 Marvell Technology Group Ltd. vendor 3COM3 0xa727 3Com vendor CACE 0xcace CACE Technologies vendor EVOLUTION 0xdeee Evolution Robotics products vendor DATAAPEX 0xdaae DataApex vendor HP2 0xf003 Hewlett Packard vendor LOGILINK 0xfc08 LogiLink vendor USRP 0xfffe GNU Radio USRP /* * List of known products. Grouped by vendor. */ /* 3Com products */ product 3COM HOMECONN 0x009d HomeConnect Camera product 3COM 3CREB96 0x00a0 Bluetooth USB Adapter product 3COM 3C19250 0x03e8 3C19250 Ethernet Adapter product 3COM 3CRSHEW696 0x0a01 3CRSHEW696 Wireless Adapter product 3COM 3C460 0x11f8 HomeConnect 3C460 product 3COM USR56K 0x3021 U.S.Robotics 56000 Voice FaxModem Pro product 3COM 3C460B 0x4601 HomeConnect 3C460B product 3COM2 3CRUSB10075 0xa727 3CRUSB10075 product 3COM3 AR5523_1 0x6893 AR5523 product 3COM3 AR5523_2 0x6895 AR5523 product 3COM3 AR5523_3 0x6897 AR5523 product 3COMUSR OFFICECONN 0x0082 3Com OfficeConnect Analog Modem product 3COMUSR USRISDN 0x008f 3Com U.S. Robotics Pro ISDN TA product 3COMUSR HOMECONN 0x009d 3Com HomeConnect Camera product 3COMUSR USR56K 0x3021 U.S. Robotics 56000 Voice FaxModem Pro /* AboCom products */ product ABOCOM XX1 0x110c XX1 product ABOCOM XX2 0x200c XX2 product ABOCOM RT2770 0x2770 RT2770 product ABOCOM RT2870 0x2870 RT2870 product ABOCOM RT3070 0x3070 RT3070 product ABOCOM RT3071 0x3071 RT3071 product ABOCOM RT3072 0x3072 RT3072 product ABOCOM2 RT2870_1 0x3c09 RT2870 product ABOCOM URE450 0x4000 URE450 Ethernet Adapter product ABOCOM UFE1000 0x4002 UFE1000 Fast Ethernet Adapter product ABOCOM DSB650TX_PNA 0x4003 1/10/100 Ethernet Adapter product ABOCOM XX4 0x4004 XX4 product ABOCOM XX5 0x4007 XX5 product ABOCOM XX6 0x400b XX6 product ABOCOM XX7 0x400c XX7 product ABOCOM RTL8151 0x401a RTL8151 product ABOCOM XX8 0x4102 XX8 product ABOCOM XX9 0x4104 XX9 product ABOCOM UF200 0x420a UF200 Ethernet product ABOCOM WL54 0x6001 WL54 product ABOCOM XX10 0xabc1 XX10 product ABOCOM BWU613 0xb000 BWU613 product ABOCOM HWU54DM 0xb21b HWU54DM product ABOCOM RT2573_2 0xb21c RT2573 product ABOCOM RT2573_3 0xb21d RT2573 product ABOCOM RT2573_4 0xb21e RT2573 product ABOCOM RTL8188CU_1 0x8188 RTL8188CU product ABOCOM RTL8188CU_2 0x8189 RTL8188CU product ABOCOM RTL8192CU 0x8178 RTL8192CU product ABOCOM RTL8188EU 0x8179 RTL8188EU product ABOCOM WUG2700 0xb21f WUG2700 /* Acton Research Corp. */ product ACTON SPECTRAPRO 0x0100 FTDI compatible adapter /* Accton products */ product ACCTON USB320_EC 0x1046 USB320-EC Ethernet Adapter product ACCTON 2664W 0x3501 2664W product ACCTON 111 0x3503 T-Sinus 111 Wireless Adapter product ACCTON SMCWUSBG_NF 0x4505 SMCWUSB-G (no firmware) product ACCTON SMCWUSBG 0x4506 SMCWUSB-G product ACCTON SMCWUSBTG2_NF 0x4507 SMCWUSBT-G2 (no firmware) product ACCTON SMCWUSBTG2 0x4508 SMCWUSBT-G2 product ACCTON PRISM_GT 0x4521 PrismGT USB 2.0 WLAN product ACCTON SS1001 0x5046 SpeedStream Ethernet Adapter product ACCTON RT2870_2 0x6618 RT2870 product ACCTON RT3070 0x7511 RT3070 product ACCTON RT2770 0x7512 RT2770 product ACCTON RT2870_3 0x7522 RT2870 product ACCTON RT2870_5 0x8522 RT2870 product ACCTON RT3070_4 0xa512 RT3070 product ACCTON RT2870_4 0xa618 RT2870 product ACCTON RT3070_1 0xa701 RT3070 product ACCTON RT3070_2 0xa702 RT3070 product ACCTON RT2870_1 0xb522 RT2870 product ACCTON RT3070_3 0xc522 RT3070 product ACCTON RT3070_5 0xd522 RT3070 product ACCTON RTL8192SU 0xc512 RTL8192SU product ACCTON ZD1211B 0xe501 ZD1211B product ACCTON WN7512 0xf522 WN7512 /* Aceeca products */ product ACEECA MEZ1000 0x0001 MEZ1000 RDA /* Acer Communications & Multimedia (oemd by Surecom) */ product ACERCM EP1427X2 0x0893 EP-1427X-2 Ethernet Adapter /* Acer Labs products */ product ACERLABS M5632 0x5632 USB 2.0 Data Link /* Acer Peripherals, Inc. products */ product ACERP ACERSCAN_C310U 0x12a6 Acerscan C310U product ACERP ACERSCAN_320U 0x2022 Acerscan 320U product ACERP ACERSCAN_640U 0x2040 Acerscan 640U product ACERP ACERSCAN_620U 0x2060 Acerscan 620U product ACERP ACERSCAN_4300U 0x20b0 Benq 3300U/4300U product ACERP ACERSCAN_640BT 0x20be Acerscan 640BT product ACERP ACERSCAN_1240U 0x20c0 Acerscan 1240U product ACERP S81 0x4027 BenQ S81 phone product ACERP H10 0x4068 AWL400 Wireless Adapter product ACERP ATAPI 0x6003 ATA/ATAPI Adapter product ACERP AWL300 0x9000 AWL300 Wireless Adapter product ACERP AWL400 0x9001 AWL400 Wireless Adapter /* Acer Warp products */ product ACERW WARPLINK 0x0204 Warplink /* Actions products */ product ACTIONS MP4 0x1101 Actions MP4 Player /* Actiontec, Inc. products */ product ACTIONTEC PRISM_25 0x0408 Prism2.5 Wireless Adapter product ACTIONTEC PRISM_25A 0x0421 Prism2.5 Wireless Adapter A product ACTIONTEC FREELAN 0x6106 ROPEX FreeLan 802.11b product ACTIONTEC UAT1 0x7605 UAT1 Wireless Ethernet Adapter /* ACTiSYS products */ product ACTISYS IR2000U 0x0011 ACT-IR2000U FIR /* ActiveWire, Inc. products */ product ACTIVEWIRE IOBOARD 0x0100 I/O Board product ACTIVEWIRE IOBOARD_FW1 0x0101 I/O Board, rev. 1 firmware /* Adaptec products */ product ADAPTEC AWN8020 0x0020 AWN-8020 WLAN /* Addtron products */ product ADDTRON AWU120 0xff31 AWU-120 /* ADLINK Texhnology products */ product ADLINK ND6530 0x6530 ND-6530 USB-Serial /* ADMtek products */ product ADMTEK PEGASUSII_4 0x07c2 AN986A Ethernet product ADMTEK PEGASUS 0x0986 AN986 Ethernet product ADMTEK PEGASUSII 0x8511 AN8511 Ethernet product ADMTEK PEGASUSII_2 0x8513 AN8513 Ethernet product ADMTEK PEGASUSII_3 0x8515 AN8515 Ethernet /* ADDON products */ /* PNY OEMs these */ product ADDON ATTACHE 0x1300 USB 2.0 Flash Drive product ADDON ATTACHE 0x1300 USB 2.0 Flash Drive product ADDON A256MB 0x1400 Attache 256MB USB 2.0 Flash Drive product ADDON DISKPRO512 0x1420 USB 2.0 Flash Drive (DANE-ELEC zMate 512MB USB flash drive) /* Addonics products */ product ADDONICS2 CABLE_205 0xa001 Cable 205 /* ADS products */ product ADS UBS10BT 0x0008 UBS-10BT Ethernet product ADS UBS10BTX 0x0009 UBS-10BT Ethernet /* AEI products */ product AEI FASTETHERNET 0x1701 Fast Ethernet /* Afatech Technologies, Inc. */ product AFATECH AFATECH1336 0x1336 Flash Card Reader /* Agate Technologies products */ product AGATE QDRIVE 0x0378 Q-Drive /* AGFA products */ product AGFA SNAPSCAN1212U 0x0001 SnapScan 1212U product AGFA SNAPSCAN1236U 0x0002 SnapScan 1236U product AGFA SNAPSCANTOUCH 0x0100 SnapScan Touch product AGFA SNAPSCAN1212U2 0x2061 SnapScan 1212U product AGFA SNAPSCANE40 0x208d SnapScan e40 product AGFA SNAPSCANE50 0x208f SnapScan e50 product AGFA SNAPSCANE20 0x2091 SnapScan e20 product AGFA SNAPSCANE25 0x2095 SnapScan e25 product AGFA SNAPSCANE26 0x2097 SnapScan e26 product AGFA SNAPSCANE52 0x20fd SnapScan e52 /* Ain Communication Technology products */ product AINCOMM AWU2000B 0x1001 AWU2000B Wireless Adapter /* AIPTEK products */ product AIPTEK POCKETCAM3M 0x2011 PocketCAM 3Mega product AIPTEK2 PENCAM_MEGA_1_3 0x504a PenCam Mega 1.3 product AIPTEK2 SUNPLUS_TECH 0x0c15 Sunplus Technology Inc. /* AirPlis products */ product AIRPLUS MCD650 0x3198 MCD650 modem /* AirPrime products */ product AIRPRIME PC5220 0x0112 CDMA Wireless PC Card product AIRPRIME USB308 0x68A3 USB308 HSPA+ USB Modem product AIRPRIME AC313U 0x68aa Sierra Wireless AirCard 313U /* AirTies products */ product AIRTIES RT3070 0x2310 RT3070 /* AKS products */ product AKS USBHASP 0x0001 USB-HASP 0.06 /* Alcatel products */ product ALCATEL OT535 0x02df One Touch 535/735 /* Alcor Micro, Inc. products */ product ALCOR2 KBD_HUB 0x2802 Kbd Hub product ALCOR DUMMY 0x0000 Dummy product product ALCOR SDCR_6335 0x6335 SD/MMC Card Reader product ALCOR SDCR_6362 0x6362 SD/MMC Card Reader product ALCOR SDCR_6366 0x6366 SD/MMC Card Reader product ALCOR TRANSCEND 0x6387 Transcend JetFlash Drive product ALCOR MA_KBD_HUB 0x9213 MacAlly Kbd Hub product ALCOR AU9814 0x9215 AU9814 Hub product ALCOR UMCR_9361 0x9361 USB Multimedia Card Reader product ALCOR SM_KBD 0x9410 MicroConnectors/StrongMan Keyboard product ALCOR NEC_KBD_HUB 0x9472 NEC Kbd Hub product ALCOR AU9720 0x9720 USB2 - RS-232 product ALCOR AU6390 0x6390 AU6390 USB-IDE converter /* Alink products */ product ALINK DWM652U5 0xce16 DWM-652 product ALINK 3G 0x9000 3G modem product ALINK 3GU 0x9200 3G modem /* Altec Lansing products */ product ALTEC ADA70 0x0070 ADA70 Speakers product ALTEC ASC495 0xff05 ASC495 Speakers /* Alti-2 products */ product ALTI2 N3 0x6001 FTDI compatible adapter /* Allied Telesyn International products */ product ALLIEDTELESYN ATUSB100 0xb100 AT-USB100 /* ALLWIN Tech products */ product ALLWIN RT2070 0x2070 RT2070 product ALLWIN RT2770 0x2770 RT2770 product ALLWIN RT2870 0x2870 RT2870 product ALLWIN RT3070 0x3070 RT3070 product ALLWIN RT3071 0x3071 RT3071 product ALLWIN RT3072 0x3072 RT3072 product ALLWIN RT3572 0x3572 RT3572 /* AlphaSmart, Inc. products */ product ALPHASMART DANA_KB 0xdbac AlphaSmart Dana Keyboard product ALPHASMART DANA_SYNC 0xdf00 AlphaSmart Dana HotSync /* Amoi products */ product AMOI H01 0x0800 H01 3G modem product AMOI H01A 0x7002 H01A 3G modem product AMOI H02 0x0802 H02 3G modem /* American Power Conversion products */ product APC UPS 0x0002 Uninterruptible Power Supply /* Ambit Microsystems products */ product AMBIT WLAN 0x0302 WLAN product AMBIT NTL_250 0x6098 NTL 250 cable modem /* Apacer products */ product APACER HT202 0xb113 USB 2.0 Flash Drive /* American Power Conversion products */ product APC UPS 0x0002 Uninterruptible Power Supply /* Amigo Technology products */ product AMIGO RT2870_1 0x9031 RT2870 product AMIGO RT2870_2 0x9041 RT2870 /* AMIT products */ product AMIT CGWLUSB2GO 0x0002 CG-WLUSB2GO product AMIT CGWLUSB2GNR 0x0008 CG-WLUSB2GNR product AMIT RT2870_1 0x0012 RT2870 /* AMIT(2) products */ product AMIT2 RT2870 0x0008 RT2870 /* Analog Devices products */ product ANALOGDEVICES GNICE 0xf000 FTDI compatible adapter product ANALOGDEVICES GNICEPLUS 0xf001 FTDI compatible adapter /* Anchor products */ product ANCHOR SERIAL 0x2008 Serial product ANCHOR EZUSB 0x2131 EZUSB product ANCHOR EZLINK 0x2720 EZLINK /* AnyData products */ product ANYDATA ADU_620UW 0x6202 CDMA 2000 EV-DO USB Modem product ANYDATA ADU_E100X 0x6501 CDMA 2000 1xRTT/EV-DO USB Modem product ANYDATA ADU_500A 0x6502 CDMA 2000 EV-DO USB Modem /* AOX, Inc. products */ product AOX USB101 0x0008 Ethernet /* American Power Conversion products */ product APC UPS 0x0002 Uninterruptible Power Supply /* Apple Computer products */ product APPLE DUMMY 0x0000 Dummy product product APPLE IMAC_KBD 0x0201 USB iMac Keyboard product APPLE KBD 0x0202 USB Keyboard M2452 product APPLE EXT_KBD 0x020c Apple Extended USB Keyboard /* MacbookAir, aka wellspring */ product APPLE WELLSPRING_ANSI 0x0223 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING_ISO 0x0224 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING_JIS 0x0225 Apple Internal Keyboard/Trackpad /* MacbookProPenryn, aka wellspring2 */ product APPLE WELLSPRING2_ANSI 0x0230 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING2_ISO 0x0231 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING2_JIS 0x0232 Apple Internal Keyboard/Trackpad /* Macbook5,1 (unibody), aka wellspring3 */ product APPLE WELLSPRING3_ANSI 0x0236 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING3_ISO 0x0237 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING3_JIS 0x0238 Apple Internal Keyboard/Trackpad /* MacbookAir3,2 (unibody), aka wellspring4 */ product APPLE WELLSPRING4_ANSI 0x023f Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4_ISO 0x0240 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4_JIS 0x0241 Apple Internal Keyboard/Trackpad /* MacbookAir3,1 (unibody), aka wellspring4 */ product APPLE WELLSPRING4A_ANSI 0x0242 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4A_ISO 0x0243 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING4A_JIS 0x0244 Apple Internal Keyboard/Trackpad /* Macbook8 (unibody, March 2011) */ product APPLE WELLSPRING5_ANSI 0x0245 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5_ISO 0x0246 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5_JIS 0x0247 Apple Internal Keyboard/Trackpad /* MacbookAir4,1 (unibody, July 2011) */ product APPLE WELLSPRING6A_ANSI 0x0249 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6A_ISO 0x024a Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6A_JIS 0x024b Apple Internal Keyboard/Trackpad /* MacbookAir4,2 (unibody, July 2011) */ product APPLE WELLSPRING6_ANSI 0x024c Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6_ISO 0x024d Apple Internal Keyboard/Trackpad product APPLE WELLSPRING6_JIS 0x024e Apple Internal Keyboard/Trackpad /* Macbook8,2 (unibody) */ product APPLE WELLSPRING5A_ANSI 0x0252 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5A_ISO 0x0253 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING5A_JIS 0x0254 Apple Internal Keyboard/Trackpad /* MacbookPro10,1 (unibody, June 2012) */ product APPLE WELLSPRING7_ANSI 0x0262 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7_ISO 0x0263 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7_JIS 0x0264 Apple Internal Keyboard/Trackpad /* MacbookPro10,2 (unibody, October 2012) */ product APPLE WELLSPRING7A_ANSI 0x0259 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7A_ISO 0x025a Apple Internal Keyboard/Trackpad product APPLE WELLSPRING7A_JIS 0x025b Apple Internal Keyboard/Trackpad /* MacbookAir6,2 (unibody, June 2013) */ product APPLE WELLSPRING8_ANSI 0x0290 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING8_ISO 0x0291 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING8_JIS 0x0292 Apple Internal Keyboard/Trackpad /* MacbookPro12,1 */ product APPLE WELLSPRING9_ANSI 0x0272 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING9_ISO 0x0273 Apple Internal Keyboard/Trackpad product APPLE WELLSPRING9_JIS 0x0274 Apple Internal Keyboard/Trackpad product APPLE MOUSE 0x0301 Mouse M4848 product APPLE OPTMOUSE 0x0302 Optical mouse product APPLE MIGHTYMOUSE 0x0304 Mighty Mouse product APPLE KBD_HUB 0x1001 Hub in Apple USB Keyboard product APPLE EXT_KBD_HUB 0x1003 Hub in Apple Extended USB Keyboard product APPLE SPEAKERS 0x1101 Speakers product APPLE IPOD 0x1201 iPod product APPLE IPOD2G 0x1202 iPod 2G product APPLE IPOD3G 0x1203 iPod 3G product APPLE IPOD_04 0x1204 iPod '04' product APPLE IPODMINI 0x1205 iPod Mini product APPLE IPOD_06 0x1206 iPod '06' product APPLE IPOD_07 0x1207 iPod '07' product APPLE IPOD_08 0x1208 iPod '08' product APPLE IPODVIDEO 0x1209 iPod Video product APPLE IPODNANO 0x120a iPod Nano product APPLE IPHONE 0x1290 iPhone product APPLE IPOD_TOUCH 0x1291 iPod Touch product APPLE IPHONE_3G 0x1292 iPhone 3G product APPLE IPHONE_3GS 0x1294 iPhone 3GS product APPLE IPHONE_4 0x1297 iPhone 4 product APPLE IPHONE_4S 0x12a0 iPhone 4S product APPLE IPHONE_5 0x12a8 iPhone 5 product APPLE IPAD 0x129a iPad product APPLE ETHERNET 0x1402 Ethernet A1277 /* Arkmicro Technologies */ product ARKMICRO ARK3116 0x0232 ARK3116 Serial /* Asahi Optical products */ product ASAHIOPTICAL OPTIO230 0x0004 Digital camera product ASAHIOPTICAL OPTIO330 0x0006 Digital camera /* Asante products */ product ASANTE EA 0x1427 Ethernet /* ASIX Electronics products */ product ASIX AX88172 0x1720 10/100 Ethernet product ASIX AX88178 0x1780 AX88178 product ASIX AX88178A 0x178a AX88178A USB 2.0 10/100/1000 Ethernet product ASIX AX88179 0x1790 AX88179 USB 3.0 10/100/1000 Ethernet product ASIX AX88772 0x7720 AX88772 product ASIX AX88772A 0x772a AX88772A USB 2.0 10/100 Ethernet product ASIX AX88772B 0x772b AX88772B USB 2.0 10/100 Ethernet product ASIX AX88772B_1 0x7e2b AX88772B USB 2.0 10/100 Ethernet /* ASUS products */ product ASUS2 USBN11 0x0b05 USB-N11 product ASUS RT2570 0x1706 RT2500USB Wireless Adapter product ASUS WL167G 0x1707 WL-167g Wireless Adapter product ASUS WL159G 0x170c WL-159g product ASUS A9T_WIFI 0x171b A9T wireless product ASUS P5B_WIFI 0x171d P5B wireless product ASUS RT2573_1 0x1723 RT2573 product ASUS RT2573_2 0x1724 RT2573 product ASUS LCM 0x1726 LCM display product ASUS RT2870_1 0x1731 RT2870 product ASUS RT2870_2 0x1732 RT2870 product ASUS RT2870_3 0x1742 RT2870 product ASUS RT2870_4 0x1760 RT2870 product ASUS RT2870_5 0x1761 RT2870 product ASUS USBN13 0x1784 USB-N13 product ASUS USBN10 0x1786 USB-N10 product ASUS RT3070_1 0x1790 RT3070 product ASUS RTL8192SU 0x1791 RTL8192SU product ASUS USB_N53 0x179d ASUS Black Diamond Dual Band USB-N53 product ASUS RTL8192CU 0x17ab RTL8192CU product ASUS USBN66 0x17ad USB-N66 product ASUS USBN10NANO 0x17ba USB-N10 Nano product ASUS USBAC51 0x17d1 USB-AC51 product ASUS USBAC56 0x17d2 USB-AC56 product ASUS A730W 0x4202 ASUS MyPal A730W product ASUS P535 0x420f ASUS P535 PDA product ASUS GMSC 0x422f ASUS Generic Mass Storage /* ATen products */ product ATEN UC1284 0x2001 Parallel printer product ATEN UC10T 0x2002 10Mbps Ethernet product ATEN UC110T 0x2007 UC-110T Ethernet product ATEN UC232A 0x2008 Serial product ATEN UC210T 0x2009 UC-210T Ethernet product ATEN DSB650C 0x4000 DSB-650C /* ATP Electronics products */ product ATP EUSB 0xaf01 ATP IG eUSB SSD /* Atheros Communications products */ product ATHEROS AR5523 0x0001 AR5523 product ATHEROS AR5523_NF 0x0002 AR5523 (no firmware) product ATHEROS2 AR5523_1 0x0001 AR5523 product ATHEROS2 AR5523_1_NF 0x0002 AR5523 (no firmware) product ATHEROS2 AR5523_2 0x0003 AR5523 product ATHEROS2 AR5523_2_NF 0x0004 AR5523 (no firmware) product ATHEROS2 AR5523_3 0x0005 AR5523 product ATHEROS2 AR5523_3_NF 0x0006 AR5523 (no firmware) product ATHEROS2 TG121N 0x1001 TG121N product ATHEROS2 WN821NV2 0x1002 WN821NV2 product ATHEROS2 3CRUSBN275 0x1010 3CRUSBN275 product ATHEROS2 WN612 0x1011 WN612 product ATHEROS2 AR9170 0x9170 AR9170 /* Atmel Comp. products */ product ATMEL STK541 0x2109 Zigbee Controller product ATMEL UHB124 0x3301 AT43301 USB 1.1 Hub product ATMEL DWL120 0x7603 DWL-120 Wireless Adapter product ATMEL BW002 0x7605 BW002 Wireless Adapter product ATMEL WL1130USB 0x7613 WL-1130 USB product ATMEL AT76C505A 0x7614 AT76c505a Wireless Adapter /* AuthenTec products */ product AUTHENTEC AES1610 0x1600 AES1610 Fingerprint Sensor /* Avision products */ product AVISION 1200U 0x0268 1200U scanner /* AVM products */ product AVM FRITZWLAN 0x8401 FRITZ!WLAN N /* Axesstel products */ product AXESSTEL DATAMODEM 0x1000 Data Modem /* AsureWave products */ product AZUREWAVE RT2870_1 0x3247 RT2870 product AZUREWAVE RT2870_2 0x3262 RT2870 product AZUREWAVE RT3070_1 0x3273 RT3070 product AZUREWAVE RT3070_2 0x3284 RT3070 product AZUREWAVE RT3070_3 0x3305 RT3070 product AZUREWAVE RTL8188CU 0x3357 RTL8188CU product AZUREWAVE RTL8188CE_1 0x3358 RTL8188CE product AZUREWAVE RTL8188CE_2 0x3359 RTL8188CE product AZUREWAVE RTL8192SU_1 0x3306 RTL8192SU product AZUREWAVE RTL8192SU_2 0x3309 RTL8192SU product AZUREWAVE RTL8192SU_3 0x3310 RTL8192SU product AZUREWAVE RTL8192SU_4 0x3311 RTL8192SU product AZUREWAVE RTL8192SU_5 0x3325 RTL8192SU /* Baltech products */ product BALTECH CARDREADER 0x9999 Card reader /* Bayer products */ product BAYER CONTOUR_CABLE 0x6001 FTDI compatible adapter /* B&B Electronics products */ product BBELECTRONICS USOTL4 0xAC01 RS-422/485 product BBELECTRONICS 232USB9M 0xac27 FTDI compatible adapter product BBELECTRONICS 485USB9F_2W 0xac25 FTDI compatible adapter product BBELECTRONICS 485USB9F_4W 0xac26 FTDI compatible adapter product BBELECTRONICS 485USBTB_2W 0xac33 FTDI compatible adapter product BBELECTRONICS 485USBTB_4W 0xac34 FTDI compatible adapter product BBELECTRONICS TTL3USB9M 0xac50 FTDI compatible adapter product BBELECTRONICS TTL5USB9M 0xac49 FTDI compatible adapter product BBELECTRONICS USO9ML2 0xac03 FTDI compatible adapter product BBELECTRONICS USO9ML2DR 0xac17 FTDI compatible adapter product BBELECTRONICS USO9ML2DR_2 0xac16 FTDI compatible adapter product BBELECTRONICS USOPTL4 0xac11 FTDI compatible adapter product BBELECTRONICS USOPTL4DR 0xac19 FTDI compatible adapter product BBELECTRONICS USOPTL4DR2 0xac18 FTDI compatible adapter product BBELECTRONICS USPTL4 0xac12 FTDI compatible adapter product BBELECTRONICS USTL4 0xac02 FTDI compatible adapter product BBELECTRONICS ZZ_PROG1_USB 0xba02 FTDI compatible adapter /* Belkin products */ /*product BELKIN F5U111 0x???? F5U111 Ethernet*/ product BELKIN F5D6050 0x0050 F5D6050 802.11b Wireless Adapter product BELKIN FBT001V 0x0081 FBT001v2 Bluetooth product BELKIN FBT003V 0x0084 FBT003v2 Bluetooth product BELKIN F5U103 0x0103 F5U103 Serial product BELKIN F5U109 0x0109 F5U109 Serial product BELKIN USB2SCSI 0x0115 USB to SCSI product BELKIN F8T012 0x0121 F8T012xx1 Bluetooth USB Adapter product BELKIN USB2LAN 0x0121 USB to LAN product BELKIN F5U208 0x0208 F5U208 VideoBus II product BELKIN F5U237 0x0237 F5U237 USB 2.0 7-Port Hub product BELKIN F5U257 0x0257 F5U257 Serial product BELKIN F5U409 0x0409 F5U409 Serial product BELKIN F6C550AVR 0x0551 F6C550-AVR UPS product BELKIN F5U120 0x1203 F5U120-PC Hub product BELKIN RTL8188CU 0x1102 RTL8188CU Wireless Adapter product BELKIN F9L1103 0x1103 F9L1103 Wireless Adapter product BELKIN RTL8192CU 0x2102 RTL8192CU Wireless Adapter product BELKIN F7D2102 0x2103 F7D2102 Wireless Adapter product BELKIN F5U258 0x258A F5U258 Host to Host cable product BELKIN ZD1211B 0x4050 ZD1211B product BELKIN F5D5055 0x5055 F5D5055 product BELKIN F5D7050 0x7050 F5D7050 Wireless Adapter product BELKIN F5D7051 0x7051 F5D7051 54g USB Network Adapter product BELKIN F5D7050A 0x705a F5D7050A Wireless Adapter /* Also sold as 'Ativa 802.11g wireless card' */ product BELKIN F5D7050_V4000 0x705c F5D7050 v4000 Wireless Adapter product BELKIN F5D7050E 0x705e F5D7050E Wireless Adapter product BELKIN RT2870_1 0x8053 RT2870 product BELKIN RT2870_2 0x805c RT2870 product BELKIN F5D8053V3 0x815c F5D8053 v3 product BELKIN RTL8192SU_1 0x815f RTL8192SU product BELKIN RTL8192SU_2 0x845a RTL8192SU product BELKIN RTL8192SU_3 0x945a RTL8192SU product BELKIN F5D8055 0x825a F5D8055 product BELKIN F5D8055V2 0x825b F5D8055 v2 product BELKIN F5D9050V3 0x905b F5D9050 ver 3 Wireless Adapter product BELKIN2 F5U002 0x0002 F5U002 Parallel printer product BELKIN F6D4050V1 0x935a F6D4050 v1 product BELKIN F6D4050V2 0x935b F6D4050 v2 /* Billionton products */ product BILLIONTON USB100 0x0986 USB100N 10/100 FastEthernet product BILLIONTON USBLP100 0x0987 USB100LP product BILLIONTON USBEL100 0x0988 USB100EL product BILLIONTON USBE100 0x8511 USBE100 product BILLIONTON USB2AR 0x90ff USB2AR Ethernet /* Broadcom products */ product BROADCOM BCM2033 0x2033 BCM2033 Bluetooth USB dongle /* Brother Industries products */ product BROTHER HL1050 0x0002 HL-1050 laser printer product BROTHER MFC8600_9650 0x0100 MFC8600/9650 multifunction device /* Behavior Technology Computer products */ product BTC BTC6100 0x5550 6100C Keyboard product BTC BTC7932 0x6782 Keyboard with mouse port /* CACE Technologies products */ product CACE AIRPCAPNX 0x0300 AirPcap NX /* Canon, Inc. products */ product CANON N656U 0x2206 CanoScan N656U product CANON N1220U 0x2207 CanoScan N1220U product CANON D660U 0x2208 CanoScan D660U product CANON N676U 0x220d CanoScan N676U product CANON N1240U 0x220e CanoScan N1240U product CANON LIDE25 0x2220 CanoScan LIDE 25 product CANON S10 0x3041 PowerShot S10 product CANON S100 0x3045 PowerShot S100 product CANON S200 0x3065 PowerShot S200 product CANON REBELXT 0x30ef Digital Rebel XT /* CATC products */ product CATC NETMATE 0x000a Netmate Ethernet product CATC NETMATE2 0x000c Netmate2 Ethernet product CATC CHIEF 0x000d USB Chief Bus & Protocol Analyzer product CATC ANDROMEDA 0x1237 Andromeda hub /* CASIO products */ product CASIO QV_DIGICAM 0x1001 QV DigiCam product CASIO EXS880 0x1105 Exilim EX-S880 product CASIO BE300 0x2002 BE-300 PDA product CASIO NAMELAND 0x4001 CASIO Nameland EZ-USB /* CCYU products */ product CCYU ED1064 0x2136 EasyDisk ED1064 /* Century products */ product CENTURY EX35QUAT 0x011e Century USB Disk Enclosure product CENTURY EX35SW4_SB4 0x011f Century USB Disk Enclosure /* Cherry products */ product CHERRY MY3000KBD 0x0001 My3000 keyboard product CHERRY MY3000HUB 0x0003 My3000 hub product CHERRY CYBOARD 0x0004 CyBoard Keyboard /* Chic Technology products */ product CHIC MOUSE1 0x0001 mouse product CHIC CYPRESS 0x0003 Cypress USB Mouse /* Chicony products */ product CHICONY KB8933 0x0001 KB-8933 keyboard product CHICONY KU0325 0x0116 KU-0325 keyboard product CHICONY CNF7129 0xb071 Notebook Web Camera product CHICONY HDUVCCAM 0xb40a HD UVC WebCam product CHICONY RTL8188CUS_1 0xaff7 RTL8188CUS product CHICONY RTL8188CUS_2 0xaff8 RTL8188CUS product CHICONY RTL8188CUS_3 0xaff9 RTL8188CUS product CHICONY RTL8188CUS_4 0xaffa RTL8188CUS product CHICONY RTL8188CUS_5 0xaffa RTL8188CUS product CHICONY2 TWINKLECAM 0x600d TwinkleCam USB camera /* CH Products */ product CHPRODUCTS PROTHROTTLE 0x00f1 Pro Throttle product CHPRODUCTS PROPEDALS 0x00f2 Pro Pedals product CHPRODUCTS FIGHTERSTICK 0x00f3 Fighterstick product CHPRODUCTS FLIGHTYOKE 0x00ff Flight Sim Yoke /* Cisco-Linksys products */ product CISCOLINKSYS WUSB54AG 0x000c WUSB54AG Wireless Adapter product CISCOLINKSYS WUSB54G 0x000d WUSB54G Wireless Adapter product CISCOLINKSYS WUSB54GP 0x0011 WUSB54GP Wireless Adapter product CISCOLINKSYS USB200MV2 0x0018 USB200M v2 product CISCOLINKSYS HU200TS 0x001a HU200TS Wireless Adapter product CISCOLINKSYS WUSB54GC 0x0020 WUSB54GC product CISCOLINKSYS WUSB54GR 0x0023 WUSB54GR product CISCOLINKSYS WUSBF54G 0x0024 WUSBF54G product CISCOLINKSYS AE1000 0x002f AE1000 product CISCOLINKSYS WUSB6300 0x003f WUSB6300 product CISCOLINKSYS USB3GIGV1 0x0041 USB3GIGV1 USB Ethernet Adapter product CISCOLINKSYS2 RT3070 0x4001 RT3070 product CISCOLINKSYS3 RT3070 0x0101 RT3070 /* Clipsal products */ product CLIPSAL 560884 0x0101 560884 C-Bus Audio Matrix Switch product CLIPSAL 5500PACA 0x0201 5500PACA C-Bus Pascal Automation Controller product CLIPSAL 5800PC 0x0301 5800PC C-Bus Wireless Interface product CLIPSAL 5500PCU 0x0303 5500PCU C-Bus Interface product CLIPSAL 5000CT2 0x0304 5000CT2 C-Bus Touch Screen product CLIPSAL C5000CT2 0x0305 C5000CT2 C-Bus Touch Screen product CLIPSAL L51xx 0x0401 L51xx C-Bus Dimmer /* C-Media products */ product CMEDIA CM6206 0x0102 CM106 compatible sound device /* CMOTECH products */ product CMOTECH CNU510 0x5141 CDMA Technologies USB modem product CMOTECH CNU550 0x5543 CDMA 2000 1xRTT/1xEVDO USB modem product CMOTECH CGU628 0x6006 CGU-628 product CMOTECH CDMA_MODEM1 0x6280 CDMA Technologies USB modem product CMOTECH DISK 0xf000 disk mode /* Compaq products */ product COMPAQ IPAQPOCKETPC 0x0003 iPAQ PocketPC product COMPAQ PJB100 0x504a Personal Jukebox PJB100 product COMPAQ IPAQLINUX 0x505a iPAQ Linux /* Composite Corp products looks the same as "TANGTOP" */ product COMPOSITE USBPS2 0x0001 USB to PS2 Adaptor /* Conceptronic products */ product CONCEPTRONIC PRISM_GT 0x3762 PrismGT USB 2.0 WLAN product CONCEPTRONIC C11U 0x7100 C11U product CONCEPTRONIC WL210 0x7110 WL-210 product CONCEPTRONIC AR5523_1 0x7801 AR5523 product CONCEPTRONIC AR5523_1_NF 0x7802 AR5523 (no firmware) product CONCEPTRONIC AR5523_2 0x7811 AR5523 product CONCEPTRONIC AR5523_2_NF 0x7812 AR5523 (no firmware) product CONCEPTRONIC2 RTL8192SU_1 0x3300 RTL8192SU product CONCEPTRONIC2 RTL8192SU_2 0x3301 RTL8192SU product CONCEPTRONIC2 RTL8192SU_3 0x3302 RTL8192SU product CONCEPTRONIC2 C54RU 0x3c02 C54RU WLAN product CONCEPTRONIC2 C54RU2 0x3c22 C54RU product CONCEPTRONIC2 RT3070_1 0x3c08 RT3070 product CONCEPTRONIC2 RT3070_2 0x3c11 RT3070 product CONCEPTRONIC2 VIGORN61 0x3c25 VIGORN61 product CONCEPTRONIC2 RT2870_1 0x3c06 RT2870 product CONCEPTRONIC2 RT2870_2 0x3c07 RT2870 product CONCEPTRONIC2 RT2870_7 0x3c09 RT2870 product CONCEPTRONIC2 RT2870_8 0x3c12 RT2870 product CONCEPTRONIC2 RT2870_3 0x3c23 RT2870 product CONCEPTRONIC2 RT2870_4 0x3c25 RT2870 product CONCEPTRONIC2 RT2870_5 0x3c27 RT2870 product CONCEPTRONIC2 RT2870_6 0x3c28 RT2870 /* Connectix products */ product CONNECTIX QUICKCAM 0x0001 QuickCam /* Conect products */ product CONTEC COM1USBH 0x8311 FTDI compatible adapter /* Corega products */ product COREGA ETHER_USB_T 0x0001 Ether USB-T product COREGA FETHER_USB_TX 0x0004 FEther USB-TX product COREGA WLAN_USB_USB_11 0x000c WirelessLAN USB-11 product COREGA FETHER_USB_TXS 0x000d FEther USB-TXS product COREGA WLANUSB 0x0012 Wireless LAN Stick-11 product COREGA FETHER_USB2_TX 0x0017 FEther USB2-TX product COREGA WLUSB_11_KEY 0x001a ULUSB-11 Key product COREGA CGUSBRS232R 0x002a CG-USBRS232R product COREGA CGWLUSB2GL 0x002d CG-WLUSB2GL product COREGA CGWLUSB2GPX 0x002e CG-WLUSB2GPX product COREGA RT2870_1 0x002f RT2870 product COREGA RT2870_2 0x003c RT2870 product COREGA RT2870_3 0x003f RT2870 product COREGA RT3070 0x0041 RT3070 product COREGA CGWLUSB300GNM 0x0042 CG-WLUSB300GNM product COREGA RTL8192SU 0x0047 RTL8192SU product COREGA RTL8192CU 0x0056 RTL8192CU product COREGA WLUSB_11_STICK 0x7613 WLAN USB Stick 11 product COREGA FETHER_USB_TXC 0x9601 FEther USB-TXC /* Corsair products */ product CORSAIR K60 0x0a60 Corsair Vengeance K60 keyboard product CORSAIR K70 0x1b09 Corsair Vengeance K70 keyboard product CORSAIR STRAFE 0x1b15 Cossair STRAFE Gaming keyboard /* Creative products */ product CREATIVE NOMAD_II 0x1002 Nomad II MP3 player product CREATIVE NOMAD_IIMG 0x4004 Nomad II MG product CREATIVE NOMAD 0x4106 Nomad product CREATIVE2 VOIP_BLASTER 0x0258 Voip Blaster product CREATIVE3 OPTICAL_MOUSE 0x0001 Notebook Optical Mouse /* Cambridge Silicon Radio Ltd. products */ product CSR BT_DONGLE 0x0001 Bluetooth USB dongle product CSR CSRDFU 0xffff USB Bluetooth Device in DFU State /* Chipsbank Microelectronics Co., Ltd */ product CHIPSBANK USBMEMSTICK 0x6025 CBM2080 Flash drive controller product CHIPSBANK USBMEMSTICK1 0x6026 CBM1180 Flash drive controller /* CTX products */ product CTX EX1300 0x9999 Ex1300 hub /* Curitel products */ product CURITEL HX550C 0x1101 CDMA 2000 1xRTT USB modem (HX-550C) product CURITEL HX57XB 0x2101 CDMA 2000 1xRTT USB modem (HX-570/575B/PR-600) product CURITEL PC5740 0x3701 Broadband Wireless modem product CURITEL UM150 0x3711 EVDO modem product CURITEL UM175 0x3714 EVDO modem /* CyberPower products */ product CYBERPOWER 1500CAVRLCD 0x0501 1500CAVRLCD /* CyberTAN Technology products */ product CYBERTAN TG54USB 0x1666 TG54USB product CYBERTAN RT2870 0x1828 RT2870 /* Cypress Semiconductor products */ product CYPRESS MOUSE 0x0001 mouse product CYPRESS THERMO 0x0002 thermometer product CYPRESS WISPY1A 0x0bad MetaGeek Wi-Spy product CYPRESS KBDHUB 0x0101 Keyboard/Hub product CYPRESS FMRADIO 0x1002 FM Radio product CYPRESS IKARILASER 0x121f Ikari Laser SteelSeries ApS product CYPRESS USBRS232 0x5500 USB-RS232 Interface product CYPRESS SLIM_HUB 0x6560 Slim Hub product CYPRESS XX6830XX 0x6830 PATA Storage Device product CYPRESS SILVERSHIELD 0xfd13 Gembird Silver Shield PM /* Daisy Technology products */ product DAISY DMC 0x6901 USB MultiMedia Reader /* Dallas Semiconductor products */ product DALLAS J6502 0x4201 J-6502 speakers /* DataApex products */ product DATAAPEX MULTICOM 0xead6 MultiCom /* Dell products */ product DELL PORT 0x0058 Port Replicator product DELL AIO926 0x5115 Photo AIO Printer 926 product DELL BC02 0x8000 BC02 Bluetooth USB Adapter product DELL PRISM_GT_1 0x8102 PrismGT USB 2.0 WLAN product DELL TM350 0x8103 TrueMobile 350 Bluetooth USB Adapter product DELL PRISM_GT_2 0x8104 PrismGT USB 2.0 WLAN product DELL U5700 0x8114 Dell 5700 3G product DELL U5500 0x8115 Dell 5500 3G product DELL U5505 0x8116 Dell 5505 3G product DELL U5700_2 0x8117 Dell 5700 3G product DELL U5510 0x8118 Dell 5510 3G product DELL U5700_3 0x8128 Dell 5700 3G product DELL U5700_4 0x8129 Dell 5700 3G product DELL U5720 0x8133 Dell 5720 3G product DELL U5720_2 0x8134 Dell 5720 3G product DELL U740 0x8135 Dell U740 CDMA product DELL U5520 0x8136 Dell 5520 3G product DELL U5520_2 0x8137 Dell 5520 3G product DELL U5520_3 0x8138 Dell 5520 3G product DELL U5730 0x8180 Dell 5730 3G product DELL U5730_2 0x8181 Dell 5730 3G product DELL U5730_3 0x8182 Dell 5730 3G product DELL DW700 0x9500 Dell DW700 GPS /* Delorme Paublishing products */ product DELORME EARTHMATE 0x0100 Earthmate GPS /* Desknote products */ product DESKNOTE UCR_61S2B 0x0c55 UCR-61S2B /* Diamond products */ product DIAMOND RIO500USB 0x0001 Rio 500 USB /* Dick Smith Electronics (really C-Net) products */ product DICKSMITH RT2573 0x9022 RT2573 product DICKSMITH CWD854F 0x9032 C-Net CWD-854 rev F /* Digi International products */ product DIGI ACCELEPORT2 0x0002 AccelePort USB 2 product DIGI ACCELEPORT4 0x0004 AccelePort USB 4 product DIGI ACCELEPORT8 0x0008 AccelePort USB 8 /* Digianswer A/S products */ product DIGIANSWER ZIGBEE802154 0x000a ZigBee/802.15.4 MAC /* D-Link products */ /*product DLINK DSBS25 0x0100 DSB-S25 serial*/ product DLINK DUBE100 0x1a00 10/100 Ethernet product DLINK DUBE100C1 0x1a02 DUB-E100 rev C1 product DLINK DSB650TX4 0x200c 10/100 Ethernet product DLINK DWL120E 0x3200 DWL-120 rev E product DLINK RTL8192CU_1 0x3307 RTL8192CU product DLINK RTL8188CU 0x3308 RTL8188CU product DLINK RTL8192CU_2 0x3309 RTL8192CU product DLINK RTL8192CU_3 0x330a RTL8192CU product DLINK DWA131B 0x330d DWA-131 rev B product DLINK DWA125D1 0x330f DWA-125 rev D1 product DLINK DWA123D1 0x3310 DWA-123 rev D1 product DLINK DWA171A1 0x3314 DWA-171 rev A1 product DLINK DWA182C1 0x3315 DWA-182 rev C1 product DLINK DWA180A1 0x3316 DWA-180 rev A1 product DLINK DWA172A1 0x3318 DWA-172 rev A1 product DLINK DWA131E1 0x3319 DWA-131 rev E1 product DLINK DWL122 0x3700 DWL-122 product DLINK DWLG120 0x3701 DWL-G120 product DLINK DWL120F 0x3702 DWL-120 rev F product DLINK DWLAG132 0x3a00 DWL-AG132 product DLINK DWLAG132_NF 0x3a01 DWL-AG132 (no firmware) product DLINK DWLG132 0x3a02 DWL-G132 product DLINK DWLG132_NF 0x3a03 DWL-G132 (no firmware) product DLINK DWLAG122 0x3a04 DWL-AG122 product DLINK DWLAG122_NF 0x3a05 DWL-AG122 (no firmware) product DLINK DWLG122 0x3c00 DWL-G122 b1 Wireless Adapter product DLINK DUBE100B1 0x3c05 DUB-E100 rev B1 product DLINK RT2870 0x3c09 RT2870 product DLINK RT3072 0x3c0a RT3072 product DLINK DWA140B3 0x3c15 DWA-140 rev B3 product DLINK DWA160B2 0x3c1a DWA-160 rev B2 product DLINK DWA127 0x3c1b DWA-127 Wireless Adapter product DLINK DWA162 0x3c1f DWA-162 Wireless Adapter product DLINK DWA140D1 0x3c20 DWA-140 rev D1 product DLINK DSB650C 0x4000 10Mbps Ethernet product DLINK DSB650TX1 0x4001 10/100 Ethernet product DLINK DSB650TX 0x4002 10/100 Ethernet product DLINK DSB650TX_PNA 0x4003 1/10/100 Ethernet product DLINK DSB650TX3 0x400b 10/100 Ethernet product DLINK DSB650TX2 0x4102 10/100 Ethernet product DLINK DUB1312 0x4a00 10/100/1000 Ethernet product DLINK DWM157 0x7d02 DWM-157 product DLINK DWR510 0x7e12 DWR-510 product DLINK DWM157_CD 0xa707 DWM-157 CD-ROM Mode product DLINK DWR510_CD 0xa805 DWR-510 CD-ROM Mode product DLINK DSB650 0xabc1 10/100 Ethernet product DLINK DUBH7 0xf103 DUB-H7 USB 2.0 7-Port Hub product DLINK2 RTL8192SU_1 0x3300 RTL8192SU product DLINK2 RTL8192SU_2 0x3302 RTL8192SU product DLINK2 DWA131A1 0x3303 DWA-131 A1 product DLINK2 DWA160A2 0x3a09 DWA-160 A2 product DLINK2 DWA120 0x3a0c DWA-120 product DLINK2 DWA120_NF 0x3a0d DWA-120 (no firmware) product DLINK2 DWA130D1 0x3a0f DWA-130 D1 product DLINK2 DWLG122C1 0x3c03 DWL-G122 c1 product DLINK2 WUA1340 0x3c04 WUA-1340 product DLINK2 DWA111 0x3c06 DWA-111 product DLINK2 DWA110 0x3c07 DWA-110 product DLINK2 RT2870_1 0x3c09 RT2870 product DLINK2 RT3072 0x3c0a RT3072 product DLINK2 RT3072_1 0x3c0b RT3072 product DLINK2 RT3070_1 0x3c0d RT3070 product DLINK2 RT3070_2 0x3c0e RT3070 product DLINK2 RT3070_3 0x3c0f RT3070 product DLINK2 DWA160A1 0x3c10 DWA-160 A1 product DLINK2 RT2870_2 0x3c11 RT2870 product DLINK2 DWA130 0x3c13 DWA-130 product DLINK2 RT3070_4 0x3c15 RT3070 product DLINK2 RT3070_5 0x3c16 RT3070 product DLINK3 DWM652 0x3e04 DWM-652 /* DisplayLink products */ product DISPLAYLINK LCD4300U 0x01ba LCD-4300U product DISPLAYLINK LCD8000U 0x01bb LCD-8000U product DISPLAYLINK LD220 0x0100 Samsung LD220 product DISPLAYLINK GUC2020 0x0059 IOGEAR DVI GUC2020 product DISPLAYLINK VCUD60 0x0136 Rextron DVI product DISPLAYLINK CONV 0x0138 StarTech CONV-USB2DVI product DISPLAYLINK DLDVI 0x0141 DisplayLink DVI product DISPLAYLINK VGA10 0x015a CMP-USBVGA10 product DISPLAYLINK WSDVI 0x0198 WS Tech DVI product DISPLAYLINK EC008 0x019b EasyCAP008 DVI product DISPLAYLINK HPDOCK 0x01d4 HP USB Docking product DISPLAYLINK NL571 0x01d7 HP USB DVI product DISPLAYLINK M01061 0x01e2 Lenovo DVI product DISPLAYLINK SWDVI 0x024c SUNWEIT DVI product DISPLAYLINK NBDOCK 0x0215 VideoHome NBdock1920 product DISPLAYLINK LUM70 0x02a9 Lilliput UM-70 product DISPLAYLINK UM7X0 0x401a nanovision MiMo product DISPLAYLINK LT1421 0x03e0 Lenovo ThinkVision LT1421 product DISPLAYLINK POLARIS2 0x0117 Polaris2 USB dock product DISPLAYLINK PLUGABLE 0x0377 Plugable docking station product DISPLAYLINK ITEC 0x02e9 i-tec USB 2.0 Docking Station /* DMI products */ product DMI CFSM_RW 0xa109 CF/SM Reader/Writer product DMI DISK 0x2bcf Generic Disk /* DrayTek products */ product DRAYTEK VIGOR550 0x0550 Vigor550 /* Dream Link products */ product DREAMLINK DL100B 0x0004 USB Webmail Notifier /* dresden elektronik products */ product DRESDENELEKTRONIK SENSORTERMINALBOARD 0x0001 SensorTerminalBoard product DRESDENELEKTRONIK WIRELESSHANDHELDTERMINAL 0x0004 Wireless Handheld Terminal product DRESDENELEKTRONIK DE_RFNODE 0x001c deRFnode product DRESDENELEKTRONIK LEVELSHIFTERSTICKLOWCOST 0x0022 Levelshifter Stick Low Cost /* DYMO */ product DYMO LABELMANAGERPNP 0x1001 DYMO LabelManager PnP /* Dynastream Innovations */ product DYNASTREAM ANTDEVBOARD 0x1003 ANT dev board product DYNASTREAM ANT2USB 0x1004 ANT2USB product DYNASTREAM ANTDEVBOARD2 0x1006 ANT dev board /* Edimax products */ product EDIMAX EW7318USG 0x7318 USB Wireless dongle product EDIMAX RTL8192SU_1 0x7611 RTL8192SU product EDIMAX RTL8192SU_2 0x7612 RTL8192SU product EDIMAX EW7622UMN 0x7622 EW-7622UMn product EDIMAX RT2870_1 0x7711 RT2870 product EDIMAX EW7717 0x7717 EW-7717 product EDIMAX EW7718 0x7718 EW-7718 product EDIMAX EW7733UND 0x7733 EW-7733UnD product EDIMAX EW7811UN 0x7811 EW-7811Un product EDIMAX RTL8192CU 0x7822 RTL8192CU product EDIMAX EW7811UTC_1 0xa811 EW-7811UTC product EDIMAX EW7811UTC_2 0xa812 EW-7811UTC product EDIMAX EW7822UAC 0xa822 EW-7822UAC /* eGalax Products */ product EGALAX TPANEL 0x0001 Touch Panel product EGALAX TPANEL2 0x0002 Touch Panel product EGALAX2 TPANEL 0x0001 Touch Panel /* EGO Products */ product EGO DUMMY 0x0000 Dummy Product product EGO M4U 0x1020 ESI M4U /* Eicon Networks */ product EICON DIVA852 0x4905 Diva 852 ISDN TA /* EIZO products */ product EIZO HUB 0x0000 hub product EIZO MONITOR 0x0001 monitor /* ELCON Systemtechnik products */ product ELCON PLAN 0x0002 Goldpfeil P-LAN /* Elecom products */ product ELECOM MOUSE29UO 0x0002 mouse 29UO product ELECOM LDUSBTX0 0x200c LD-USB/TX product ELECOM LDUSBTX1 0x4002 LD-USB/TX product ELECOM LDUSBLTX 0x4005 LD-USBL/TX product ELECOM WDC150SU2M 0x4008 WDC-150SU2M product ELECOM LDUSBTX2 0x400b LD-USB/TX product ELECOM LDUSB20 0x4010 LD-USB20 product ELECOM UCSGT 0x5003 UC-SGT product ELECOM UCSGT0 0x5004 UC-SGT product ELECOM LDUSBTX3 0xabc1 LD-USB/TX /* Elektor products */ product ELEKTOR FT323R 0x0005 FTDI compatible adapter /* Elsa products */ product ELSA MODEM1 0x2265 ELSA Modem Board product ELSA USB2ETHERNET 0x3000 Microlink USB2Ethernet /* ELV products */ product ELV USBI2C 0xe00f USB-I2C interface /* EMS products */ product EMS DUAL_SHOOTER 0x0003 PSX gun controller converter /* Emtec products */ product EMTEC RUF2PS 0x2240 Flash Drive /* Encore products */ product ENCORE RT3070_1 0x1480 RT3070 product ENCORE RT3070_2 0x14a1 RT3070 product ENCORE RT3070_3 0x14a9 RT3070 /* Entrega products */ product ENTREGA 1S 0x0001 1S serial product ENTREGA 2S 0x0002 2S serial product ENTREGA 1S25 0x0003 1S25 serial product ENTREGA 4S 0x0004 4S serial product ENTREGA E45 0x0005 E45 Ethernet product ENTREGA CENTRONICS 0x0006 Parallel Port product ENTREGA XX1 0x0008 Ethernet product ENTREGA 1S9 0x0093 1S9 serial product ENTREGA EZUSB 0x8000 EZ-USB /*product ENTREGA SERIAL 0x8001 DB25 Serial*/ product ENTREGA 2U4S 0x8004 2U4S serial/usb hub product ENTREGA XX2 0x8005 Ethernet /*product ENTREGA SERIAL_DB9 0x8093 DB9 Serial*/ /* Epson products */ product EPSON PRINTER1 0x0001 USB Printer product EPSON PRINTER2 0x0002 ISD USB Smart Cable for Mac product EPSON PRINTER3 0x0003 ISD USB Smart Cable product EPSON PRINTER5 0x0005 USB Printer product EPSON 636 0x0101 Perfection 636U / 636Photo scanner product EPSON 610 0x0103 Perfection 610 scanner product EPSON 1200 0x0104 Perfection 1200U / 1200Photo scanner product EPSON 1600 0x0107 Expression 1600 scanner product EPSON 1640 0x010a Perfection 1640SU scanner product EPSON 1240 0x010b Perfection 1240U / 1240Photo scanner product EPSON 640U 0x010c Perfection 640U scanner product EPSON 1250 0x010f Perfection 1250U / 1250Photo scanner product EPSON 1650 0x0110 Perfection 1650 scanner product EPSON GT9700F 0x0112 GT-9700F scanner product EPSON GT9300UF 0x011b GT-9300UF scanner product EPSON 3200 0x011c Perfection 3200 scanner product EPSON 1260 0x011d Perfection 1260 scanner product EPSON 1660 0x011e Perfection 1660 scanner product EPSON 1670 0x011f Perfection 1670 scanner product EPSON 1270 0x0120 Perfection 1270 scanner product EPSON 2480 0x0121 Perfection 2480 scanner product EPSON 3590 0x0122 Perfection 3590 scanner product EPSON 4990 0x012a Perfection 4990 Photo scanner product EPSON CRESSI_EDY 0x0521 Cressi Edy diving computer product EPSON N2ITION3 0x0522 Zeagle N2iTion3 diving computer product EPSON STYLUS_875DC 0x0601 Stylus Photo 875DC Card Reader product EPSON STYLUS_895 0x0602 Stylus Photo 895 Card Reader product EPSON CX5400 0x0808 CX5400 scanner product EPSON 3500 0x080e CX-3500/3600/3650 MFP product EPSON RX425 0x080f Stylus Photo RX425 scanner product EPSON DX3800 0x0818 CX3700/CX3800/DX38x0 MFP scanner product EPSON 4800 0x0819 CX4700/CX4800/DX48x0 MFP scanner product EPSON 4200 0x0820 CX4100/CX4200/DX4200 MFP scanner product EPSON 5000 0x082b CX4900/CX5000/DX50x0 MFP scanner product EPSON 6000 0x082e CX5900/CX6000/DX60x0 MFP scanner product EPSON DX4000 0x082f DX4000 MFP scanner product EPSON DX7400 0x0838 CX7300/CX7400/DX7400 MFP scanner product EPSON DX8400 0x0839 CX8300/CX8400/DX8400 MFP scanner product EPSON SX100 0x0841 SX100/NX100 MFP scanner product EPSON NX300 0x0848 NX300 MFP scanner product EPSON SX200 0x0849 SX200/SX205 MFP scanner product EPSON SX400 0x084a SX400/NX400/TX400 MFP scanner /* e-TEK Labs products */ product ETEK 1COM 0x8007 Serial /* Evolution products */ product EVOLUTION ER1 0x0300 FTDI compatible adapter product EVOLUTION HYBRID 0x0302 FTDI compatible adapter product EVOLUTION RCM4 0x0303 FTDI compatible adapter /* Extended Systems products */ product EXTENDED XTNDACCESS 0x0100 XTNDAccess IrDA /* Falcom products */ product FALCOM TWIST 0x0001 USB GSM/GPRS Modem product FALCOM SAMBA 0x0005 FTDI compatible adapter /* FEIYA products */ product FEIYA DUMMY 0x0000 Dummy product product FEIYA 5IN1 0x1132 5-in-1 Card Reader product FEIYA ELANGO 0x6200 MicroSDHC Card Reader product FEIYA AC110 0x6300 AC-110 Card Reader /* FeiXun Communication products */ product FEIXUN RTL8188CU 0x0090 RTL8188CU product FEIXUN RTL8192CU 0x0091 RTL8192CU /* Festo */ product FESTO CPX_USB 0x0102 CPX-USB product FESTO CMSP 0x0501 CMSP /* Fiberline */ product FIBERLINE WL430U 0x6003 WL-430U /* FIC / OpenMoko */ product FIC NEO1973_DEBUG 0x5118 FTDI compatible adapter /* Fossil, Inc products */ product FOSSIL WRISTPDA 0x0002 Wrist PDA /* Foxconn products */ product FOXCONN TCOM_TC_300 0xe000 T-Com TC 300 product FOXCONN PIRELLI_DP_L10 0xe003 Pirelli DP-L10 /* Freecom products */ product FREECOM DVD 0xfc01 DVD drive product FREECOM HDD 0xfc05 Classic SL Hard Drive /* Fujitsu Siemens Computers products */ product FSC E5400 0x1009 PrismGT USB 2.0 WLAN /* Future Technology Devices products */ product FTDI SCX8_USB_PHOENIX 0x5259 SCx8 USB Phoenix interface product FTDI SERIAL_8U100AX 0x8372 8U100AX Serial product FTDI SERIAL_8U232AM 0x6001 8U232AM Serial product FTDI SERIAL_8U232AM4 0x6004 8U232AM Serial product FTDI SERIAL_232RL 0x6006 FT232RL Serial product FTDI SERIAL_2232C 0x6010 FT2232C Dual port Serial product FTDI 232H 0x6014 FTDI compatible adapter product FTDI 232EX 0x6015 FTDI compatible adapter product FTDI SERIAL_2232D 0x9e90 FT2232D Dual port Serial product FTDI SERIAL_4232H 0x6011 FT4232H Quad port Serial product FTDI XDS100V2 0xa6d0 TI XDS100V1/V2 and early Beaglebones product FTDI XDS100V3 0xa6d1 TI XDS100V3 product FTDI KTLINK 0xbbe2 KT-LINK Embedded Hackers Multitool product FTDI TURTELIZER2 0xbdc8 egnite Turtelizer 2 JTAG/RS232 Adapter /* Gude Analog- und Digitalsysteme products also uses FTDI's id: */ product FTDI TACTRIX_OPENPORT_13M 0xcc48 OpenPort 1.3 Mitsubishi product FTDI TACTRIX_OPENPORT_13S 0xcc49 OpenPort 1.3 Subaru product FTDI TACTRIX_OPENPORT_13U 0xcc4a OpenPort 1.3 Universal product FTDI GAMMASCOUT 0xd678 Gamma-Scout product FTDI KBS 0xe6c8 Pyramid KBS USB LCD product FTDI EISCOU 0xe888 Expert ISDN Control USB product FTDI UOPTBR 0xe889 USB-RS232 OptoBridge product FTDI EMCU2D 0xe88a Expert mouseCLOCK USB II product FTDI PCMSFU 0xe88b Precision Clock MSF USB product FTDI EMCU2H 0xe88c Expert mouseCLOCK USB II HBG product FTDI MAXSTREAM 0xee18 Maxstream PKG-U product FTDI USB_UIRT 0xf850 USB-UIRT product FTDI USBSERIAL 0xfa00 Matrix Orbital USB Serial product FTDI MX2_3 0xfa01 Matrix Orbital MX2 or MX3 product FTDI MX4_5 0xfa02 Matrix Orbital MX4 or MX5 product FTDI LK202 0xfa03 Matrix Orbital VK/LK202 Family product FTDI LK204 0xfa04 Matrix Orbital VK/LK204 Family product FTDI CFA_632 0xfc08 Crystalfontz CFA-632 USB LCD product FTDI CFA_634 0xfc09 Crystalfontz CFA-634 USB LCD product FTDI CFA_633 0xfc0b Crystalfontz CFA-633 USB LCD product FTDI CFA_631 0xfc0c Crystalfontz CFA-631 USB LCD product FTDI CFA_635 0xfc0d Crystalfontz CFA-635 USB LCD product FTDI SEMC_DSS20 0xfc82 SEMC DSS-20 SyncStation /* Commerzielle und Technische Informationssysteme GmbH products */ product FTDI CTI_USB_NANO_485 0xf60b CTI USB-Nano 485 product FTDI CTI_USB_MINI_485 0xf608 CTI USB-Mini 485 /* Other products */ product FTDI 232RL 0xfbfa FTDI compatible adapter product FTDI 4N_GALAXY_DE_1 0xf3c0 FTDI compatible adapter product FTDI 4N_GALAXY_DE_2 0xf3c1 FTDI compatible adapter product FTDI 4N_GALAXY_DE_3 0xf3c2 FTDI compatible adapter product FTDI 8U232AM_ALT 0x6006 FTDI compatible adapter product FTDI ACCESSO 0xfad0 FTDI compatible adapter product FTDI ACG_HFDUAL 0xdd20 FTDI compatible adapter product FTDI ACTIVE_ROBOTS 0xe548 FTDI compatible adapter product FTDI ACTZWAVE 0xf2d0 FTDI compatible adapter product FTDI AMC232 0xff00 FTDI compatible adapter product FTDI ARTEMIS 0xdf28 FTDI compatible adapter product FTDI ASK_RDR400 0xc991 FTDI compatible adapter product FTDI ATIK_ATK16 0xdf30 FTDI compatible adapter product FTDI ATIK_ATK16C 0xdf32 FTDI compatible adapter product FTDI ATIK_ATK16HR 0xdf31 FTDI compatible adapter product FTDI ATIK_ATK16HRC 0xdf33 FTDI compatible adapter product FTDI ATIK_ATK16IC 0xdf35 FTDI compatible adapter product FTDI BCS_SE923 0xfb99 FTDI compatible adapter product FTDI CANDAPTER 0x9f80 FTDI compatible adapter product FTDI CANUSB 0xffa8 FTDI compatible adapter product FTDI CCSICDU20_0 0xf9d0 FTDI compatible adapter product FTDI CCSICDU40_1 0xf9d1 FTDI compatible adapter product FTDI CCSICDU64_4 0xf9d4 FTDI compatible adapter product FTDI CCSLOAD_N_GO_3 0xf9d3 FTDI compatible adapter product FTDI CCSMACHX_2 0xf9d2 FTDI compatible adapter product FTDI CCSPRIME8_5 0xf9d5 FTDI compatible adapter product FTDI CHAMSYS_24_MASTER_WING 0xdaf8 FTDI compatible adapter product FTDI CHAMSYS_MAXI_WING 0xdafd FTDI compatible adapter product FTDI CHAMSYS_MEDIA_WING 0xdafe FTDI compatible adapter product FTDI CHAMSYS_MIDI_TIMECODE 0xdafb FTDI compatible adapter product FTDI CHAMSYS_MINI_WING 0xdafc FTDI compatible adapter product FTDI CHAMSYS_PC_WING 0xdaf9 FTDI compatible adapter product FTDI CHAMSYS_USB_DMX 0xdafa FTDI compatible adapter product FTDI CHAMSYS_WING 0xdaff FTDI compatible adapter product FTDI COM4SM 0xd578 FTDI compatible adapter product FTDI CONVERTER_0 0xd388 FTDI compatible adapter product FTDI CONVERTER_1 0xd389 FTDI compatible adapter product FTDI CONVERTER_2 0xd38a FTDI compatible adapter product FTDI CONVERTER_3 0xd38b FTDI compatible adapter product FTDI CONVERTER_4 0xd38c FTDI compatible adapter product FTDI CONVERTER_5 0xd38d FTDI compatible adapter product FTDI CONVERTER_6 0xd38e FTDI compatible adapter product FTDI CONVERTER_7 0xd38f FTDI compatible adapter product FTDI DMX4ALL 0xc850 FTDI compatible adapter product FTDI DOMINTELL_DGQG 0xef50 FTDI compatible adapter product FTDI DOMINTELL_DUSB 0xef51 FTDI compatible adapter product FTDI DOTEC 0x9868 FTDI compatible adapter product FTDI ECLO_COM_1WIRE 0xea90 FTDI compatible adapter product FTDI ECO_PRO_CDS 0xe520 FTDI compatible adapter product FTDI ELSTER_UNICOM 0xe700 FTDI compatible adapter product FTDI ELV_ALC8500 0xf06e FTDI compatible adapter product FTDI ELV_CLI7000 0xfb59 FTDI compatible adapter product FTDI ELV_CSI8 0xe0f0 FTDI compatible adapter product FTDI ELV_EC3000 0xe006 FTDI compatible adapter product FTDI ELV_EM1000DL 0xe0f1 FTDI compatible adapter product FTDI ELV_EM1010PC 0xe0ef FTDI compatible adapter product FTDI ELV_FEM 0xe00a FTDI compatible adapter product FTDI ELV_FHZ1000PC 0xf06f FTDI compatible adapter product FTDI ELV_FHZ1300PC 0xe0e8 FTDI compatible adapter product FTDI ELV_FM3RX 0xe0ed FTDI compatible adapter product FTDI ELV_FS20SIG 0xe0f4 FTDI compatible adapter product FTDI ELV_HS485 0xe0ea FTDI compatible adapter product FTDI ELV_KL100 0xe002 FTDI compatible adapter product FTDI ELV_MSM1 0xe001 FTDI compatible adapter product FTDI ELV_PCD200 0xf06c FTDI compatible adapter product FTDI ELV_PCK100 0xe0f2 FTDI compatible adapter product FTDI ELV_PPS7330 0xfb5c FTDI compatible adapter product FTDI ELV_RFP500 0xe0f3 FTDI compatible adapter product FTDI ELV_T1100 0xf06b FTDI compatible adapter product FTDI ELV_TFD128 0xe0ec FTDI compatible adapter product FTDI ELV_TFM100 0xfb5d FTDI compatible adapter product FTDI ELV_TWS550 0xe009 FTDI compatible adapter product FTDI ELV_UAD8 0xf068 FTDI compatible adapter product FTDI ELV_UDA7 0xf069 FTDI compatible adapter product FTDI ELV_UDF77 0xfb5e FTDI compatible adapter product FTDI ELV_UIO88 0xfb5f FTDI compatible adapter product FTDI ELV_ULA200 0xf06d FTDI compatible adapter product FTDI ELV_UM100 0xfb5a FTDI compatible adapter product FTDI ELV_UMS100 0xe0eb FTDI compatible adapter product FTDI ELV_UO100 0xfb5b FTDI compatible adapter product FTDI ELV_UR100 0xfb58 FTDI compatible adapter product FTDI ELV_USI2 0xf06a FTDI compatible adapter product FTDI ELV_USR 0xe000 FTDI compatible adapter product FTDI ELV_UTP8 0xe0f5 FTDI compatible adapter product FTDI ELV_WS300PC 0xe0f6 FTDI compatible adapter product FTDI ELV_WS444PC 0xe0f7 FTDI compatible adapter product FTDI ELV_WS500 0xe0e9 FTDI compatible adapter product FTDI ELV_WS550 0xe004 FTDI compatible adapter product FTDI ELV_WS777 0xe0ee FTDI compatible adapter product FTDI ELV_WS888 0xe008 FTDI compatible adapter product FTDI FUTURE_0 0xf44a FTDI compatible adapter product FTDI FUTURE_1 0xf44b FTDI compatible adapter product FTDI FUTURE_2 0xf44c FTDI compatible adapter product FTDI GENERIC 0x9378 FTDI compatible adapter product FTDI GUDEADS_E808 0xe808 FTDI compatible adapter product FTDI GUDEADS_E809 0xe809 FTDI compatible adapter product FTDI GUDEADS_E80A 0xe80a FTDI compatible adapter product FTDI GUDEADS_E80B 0xe80b FTDI compatible adapter product FTDI GUDEADS_E80C 0xe80c FTDI compatible adapter product FTDI GUDEADS_E80D 0xe80d FTDI compatible adapter product FTDI GUDEADS_E80E 0xe80e FTDI compatible adapter product FTDI GUDEADS_E80F 0xe80f FTDI compatible adapter product FTDI GUDEADS_E88D 0xe88d FTDI compatible adapter product FTDI GUDEADS_E88E 0xe88e FTDI compatible adapter product FTDI GUDEADS_E88F 0xe88f FTDI compatible adapter product FTDI HD_RADIO 0x937c FTDI compatible adapter product FTDI HO720 0xed72 FTDI compatible adapter product FTDI HO730 0xed73 FTDI compatible adapter product FTDI HO820 0xed74 FTDI compatible adapter product FTDI HO870 0xed71 FTDI compatible adapter product FTDI IBS_APP70 0xff3d FTDI compatible adapter product FTDI IBS_PCMCIA 0xff3a FTDI compatible adapter product FTDI IBS_PEDO 0xff3e FTDI compatible adapter product FTDI IBS_PICPRO 0xff39 FTDI compatible adapter product FTDI IBS_PK1 0xff3b FTDI compatible adapter product FTDI IBS_PROD 0xff3f FTDI compatible adapter product FTDI IBS_RS232MON 0xff3c FTDI compatible adapter product FTDI IBS_US485 0xff38 FTDI compatible adapter product FTDI IPLUS 0xd070 FTDI compatible adapter product FTDI IPLUS2 0xd071 FTDI compatible adapter product FTDI IRTRANS 0xfc60 FTDI compatible adapter product FTDI LENZ_LIUSB 0xd780 FTDI compatible adapter product FTDI LM3S_DEVEL_BOARD 0xbcd8 FTDI compatible adapter product FTDI LM3S_EVAL_BOARD 0xbcd9 FTDI compatible adapter product FTDI LM3S_ICDI_B_BOARD 0xbcda FTDI compatible adapter product FTDI MASTERDEVEL2 0xf449 FTDI compatible adapter product FTDI MHAM_DB9 0xeeed FTDI compatible adapter product FTDI MHAM_IC 0xeeec FTDI compatible adapter product FTDI MHAM_KW 0xeee8 FTDI compatible adapter product FTDI MHAM_RS232 0xeeee FTDI compatible adapter product FTDI MHAM_Y6 0xeeea FTDI compatible adapter product FTDI MHAM_Y8 0xeeeb FTDI compatible adapter product FTDI MHAM_Y9 0xeeef FTDI compatible adapter product FTDI MHAM_YS 0xeee9 FTDI compatible adapter product FTDI MICRO_CHAMELEON 0xcaa0 FTDI compatible adapter product FTDI MTXORB_5 0xfa05 FTDI compatible adapter product FTDI MTXORB_6 0xfa06 FTDI compatible adapter product FTDI NXTCAM 0xabb8 FTDI compatible adapter product FTDI OCEANIC 0xf460 FTDI compatible adapter product FTDI OOCDLINK 0xbaf8 FTDI compatible adapter product FTDI OPENDCC 0xbfd8 FTDI compatible adapter product FTDI OPENDCC_GATEWAY 0xbfdb FTDI compatible adapter product FTDI OPENDCC_GBM 0xbfdc FTDI compatible adapter product FTDI OPENDCC_SNIFFER 0xbfd9 FTDI compatible adapter product FTDI OPENDCC_THROTTLE 0xbfda FTDI compatible adapter product FTDI PCDJ_DAC2 0xfa88 FTDI compatible adapter product FTDI PERLE_ULTRAPORT 0xf0c0 FTDI compatible adapter product FTDI PHI_FISCO 0xe40b FTDI compatible adapter product FTDI PIEGROUP 0xf208 FTDI compatible adapter product FTDI PROPOX_JTAGCABLEII 0xd738 FTDI compatible adapter product FTDI R2000KU_TRUE_RNG 0xfb80 FTDI compatible adapter product FTDI R2X0 0xfc71 FTDI compatible adapter product FTDI RELAIS 0xfa10 FTDI compatible adapter product FTDI REU_TINY 0xed22 FTDI compatible adapter product FTDI RMP200 0xe729 FTDI compatible adapter product FTDI RM_CANVIEW 0xfd60 FTDI compatible adapter product FTDI RRCIRKITS_LOCOBUFFER 0xc7d0 FTDI compatible adapter product FTDI SCIENCESCOPE_HS_LOGBOOK 0xff1d FTDI compatible adapter product FTDI SCIENCESCOPE_LOGBOOKML 0xff18 FTDI compatible adapter product FTDI SCIENCESCOPE_LS_LOGBOOK 0xff1c FTDI compatible adapter product FTDI SCS_DEVICE_0 0xd010 FTDI compatible adapter product FTDI SCS_DEVICE_1 0xd011 FTDI compatible adapter product FTDI SCS_DEVICE_2 0xd012 FTDI compatible adapter product FTDI SCS_DEVICE_3 0xd013 FTDI compatible adapter product FTDI SCS_DEVICE_4 0xd014 FTDI compatible adapter product FTDI SCS_DEVICE_5 0xd015 FTDI compatible adapter product FTDI SCS_DEVICE_6 0xd016 FTDI compatible adapter product FTDI SCS_DEVICE_7 0xd017 FTDI compatible adapter product FTDI SDMUSBQSS 0xf448 FTDI compatible adapter product FTDI SIGNALYZER_SH2 0xbca2 FTDI compatible adapter product FTDI SIGNALYZER_SH4 0xbca4 FTDI compatible adapter product FTDI SIGNALYZER_SLITE 0xbca1 FTDI compatible adapter product FTDI SIGNALYZER_ST 0xbca0 FTDI compatible adapter product FTDI SPECIAL_1 0xfc70 FTDI compatible adapter product FTDI SPECIAL_3 0xfc72 FTDI compatible adapter product FTDI SPECIAL_4 0xfc73 FTDI compatible adapter product FTDI SPROG_II 0xf0c8 FTDI compatible adapter product FTDI SR_RADIO 0x9379 FTDI compatible adapter product FTDI SUUNTO_SPORTS 0xf680 FTDI compatible adapter product FTDI TAVIR_STK500 0xfa33 FTDI compatible adapter product FTDI TERATRONIK_D2XX 0xec89 FTDI compatible adapter product FTDI TERATRONIK_VCP 0xec88 FTDI compatible adapter product FTDI THORLABS 0xfaf0 FTDI compatible adapter product FTDI TNC_X 0xebe0 FTDI compatible adapter product FTDI TTUSB 0xff20 FTDI compatible adapter product FTDI USBX_707 0xf857 FTDI compatible adapter product FTDI USINT_CAT 0xb810 FTDI compatible adapter product FTDI USINT_RS232 0xb812 FTDI compatible adapter product FTDI USINT_WKEY 0xb811 FTDI compatible adapter product FTDI VARDAAN 0xf070 FTDI compatible adapter product FTDI VNHCPCUSB_D 0xfe38 FTDI compatible adapter product FTDI WESTREX_MODEL_777 0xdc00 FTDI compatible adapter product FTDI WESTREX_MODEL_8900F 0xdc01 FTDI compatible adapter product FTDI XF_547 0xfc0a FTDI compatible adapter product FTDI XF_640 0xfc0e FTDI compatible adapter product FTDI XF_642 0xfc0f FTDI compatible adapter product FTDI XM_RADIO 0x937a FTDI compatible adapter product FTDI YEI_SERVOCENTER31 0xe050 FTDI compatible adapter /* Fuji photo products */ product FUJIPHOTO MASS0100 0x0100 Mass Storage /* Fujitsu protducts */ product FUJITSU AH_F401U 0x105b AH-F401U Air H device /* Fujitsu-Siemens protducts */ product FUJITSUSIEMENS SCR 0x0009 Fujitsu-Siemens SCR USB Reader /* Garmin products */ product GARMIN FORERUNNER230 0x086d ForeRunner 230 product GARMIN IQUE_3600 0x0004 iQue 3600 /* Gemalto products */ product GEMALTO PROXPU 0x5501 Prox-PU/CU RFID Card Reader /* General Instruments (Motorola) products */ product GENERALINSTMNTS SB5100 0x5100 SURFboard SB5100 Cable modem /* Genesys Logic products */ product GENESYS GL620USB 0x0501 GL620USB Host-Host interface product GENESYS GL650 0x0604 GL650 HUB product GENESYS GL606 0x0606 GL606 USB 2.0 HUB product GENESYS GL850G 0x0608 GL850G USB 2.0 HUB product GENESYS GL641USB 0x0700 GL641USB CompactFlash Card Reader product GENESYS GL641USB2IDE_2 0x0701 GL641USB USB-IDE Bridge No 2 product GENESYS GL641USB2IDE 0x0702 GL641USB USB-IDE Bridge product GENESYS GL641USB_2 0x0760 GL641USB 6-in-1 Card Reader /* GIGABYTE products */ product GIGABYTE GN54G 0x8001 GN-54G product GIGABYTE GNBR402W 0x8002 GN-BR402W product GIGABYTE GNWLBM101 0x8003 GN-WLBM101 product GIGABYTE GNWBKG 0x8007 GN-WBKG product GIGABYTE GNWB01GS 0x8008 GN-WB01GS product GIGABYTE GNWI05GS 0x800a GN-WI05GS /* Gigaset products */ product GIGASET WLAN 0x0701 WLAN product GIGASET SMCWUSBTG 0x0710 SMCWUSBT-G product GIGASET SMCWUSBTG_NF 0x0711 SMCWUSBT-G (no firmware) product GIGASET AR5523 0x0712 AR5523 product GIGASET AR5523_NF 0x0713 AR5523 (no firmware) product GIGASET RT2573 0x0722 RT2573 product GIGASET RT3070_1 0x0740 RT3070 product GIGASET RT3070_2 0x0744 RT3070 product GIGABYTE RT2870_1 0x800b RT2870 product GIGABYTE GNWB31N 0x800c GN-WB31N product GIGABYTE GNWB32L 0x800d GN-WB32L /* Global Sun Technology product */ product GLOBALSUN AR5523_1 0x7801 AR5523 product GLOBALSUN AR5523_1_NF 0x7802 AR5523 (no firmware) product GLOBALSUN AR5523_2 0x7811 AR5523 product GLOBALSUN AR5523_2_NF 0x7812 AR5523 (no firmware) /* Globespan products */ product GLOBESPAN PRISM_GT_1 0x2000 PrismGT USB 2.0 WLAN product GLOBESPAN PRISM_GT_2 0x2002 PrismGT USB 2.0 WLAN /* G.Mate, Inc products */ product GMATE YP3X00 0x1001 YP3X00 PDA /* GN Otometrics */ product GNOTOMETRICS USB 0x0010 FTDI compatible adapter /* GoHubs products */ product GOHUBS GOCOM232 0x1001 GoCOM232 Serial /* Good Way Technology products */ product GOODWAY GWUSB2E 0x6200 GWUSB2E product GOODWAY RT2573 0xc019 RT2573 /* Google products */ product GOOGLE NEXUSONE 0x4e11 Nexus One /* Gravis products */ product GRAVIS GAMEPADPRO 0x4001 GamePad Pro /* GREENHOUSE products */ product GREENHOUSE KANA21 0x0001 CF-writer with MP3 /* Griffin Technology */ product GRIFFIN IMATE 0x0405 iMate, ADB Adapter /* Guillemot Corporation */ product GUILLEMOT DALEADER 0xa300 DA Leader product GUILLEMOT HWGUSB254 0xe000 HWGUSB2-54 WLAN product GUILLEMOT HWGUSB254LB 0xe010 HWGUSB2-54-LB product GUILLEMOT HWGUSB254V2AP 0xe020 HWGUSB2-54V2-AP product GUILLEMOT HWNU300 0xe030 HWNU-300 product GUILLEMOT HWNUM300 0xe031 HWNUm-300 product GUILLEMOT HWGUN54 0xe032 HWGUn-54 product GUILLEMOT HWNUP150 0xe033 HWNUP-150 /* Hagiwara products */ product HAGIWARA FGSM 0x0002 FlashGate SmartMedia Card Reader product HAGIWARA FGCF 0x0003 FlashGate CompactFlash Card Reader product HAGIWARA FG 0x0005 FlashGate /* HAL Corporation products */ product HAL IMR001 0x0011 Crossam2+USB IR commander /* Handspring, Inc. */ product HANDSPRING VISOR 0x0100 Handspring Visor product HANDSPRING TREO 0x0200 Handspring Treo product HANDSPRING TREO600 0x0300 Handspring Treo 600 /* Hauppauge Computer Works */ product HAUPPAUGE WINTV_USB_FM 0x4d12 WinTV USB FM product HAUPPAUGE2 NOVAT500 0x9580 NovaT 500Stick /* Hawking Technologies products */ product HAWKING RT2870_1 0x0001 RT2870 product HAWKING RT2870_2 0x0003 RT2870 product HAWKING HWUN2 0x0009 HWUN2 product HAWKING RT3070 0x000b RT3070 product HAWKING RTL8192CU 0x0019 RTL8192CU product HAWKING UF100 0x400c 10/100 USB Ethernet product HAWKING RTL8192SU_1 0x0015 RTL8192SU product HAWKING RTL8192SU_2 0x0016 RTL8192SU product HAWKING HD65U 0x0023 HD65U /* HID Global GmbH products */ product HIDGLOBAL CM2020 0x0596 Omnikey Cardman 2020 product HIDGLOBAL CM6020 0x1784 Omnikey Cardman 6020 /* Hitachi, Ltd. products */ product HITACHI DVDCAM_DZ_MV100A 0x0004 DVD-CAM DZ-MV100A Camcorder product HITACHI DVDCAM_USB 0x001e DVDCAM USB HS Interface /* Holtek products */ product HOLTEK F85 0xa030 Holtek USB gaming keyboard /* HP products */ product HP 895C 0x0004 DeskJet 895C product HP 4100C 0x0101 Scanjet 4100C product HP S20 0x0102 Photosmart S20 product HP 880C 0x0104 DeskJet 880C product HP 4200C 0x0105 ScanJet 4200C product HP CDWRITERPLUS 0x0107 CD-Writer Plus product HP KBDHUB 0x010c Multimedia Keyboard Hub product HP G55XI 0x0111 OfficeJet G55xi product HP HN210W 0x011c HN210W 802.11b WLAN product HP 49GPLUS 0x0121 49g+ graphing calculator product HP 6200C 0x0201 ScanJet 6200C product HP S20b 0x0202 PhotoSmart S20 product HP 815C 0x0204 DeskJet 815C product HP 3300C 0x0205 ScanJet 3300C product HP CDW8200 0x0207 CD-Writer Plus 8200e product HP MMKEYB 0x020c Multimedia keyboard product HP 1220C 0x0212 DeskJet 1220C product HP UN2420_QDL 0x241d UN2420 QDL Firmware Loader product HP UN2420 0x251d UN2420 WWAN/GPS Module product HP 810C 0x0304 DeskJet 810C/812C product HP 4300C 0x0305 Scanjet 4300C product HP CDW4E 0x0307 CD-Writer+ CD-4e product HP G85XI 0x0311 OfficeJet G85xi product HP 1200 0x0317 LaserJet 1200 product HP 5200C 0x0401 Scanjet 5200C product HP 830C 0x0404 DeskJet 830C product HP 3400CSE 0x0405 ScanJet 3400cse product HP 6300C 0x0601 Scanjet 6300C product HP 840C 0x0604 DeskJet 840c product HP 2200C 0x0605 ScanJet 2200C product HP 5300C 0x0701 Scanjet 5300C product HP 4400C 0x0705 Scanjet 4400C product HP 4470C 0x0805 Scanjet 4470C product HP 82x0C 0x0b01 Scanjet 82x0C product HP 2300D 0x0b17 Laserjet 2300d product HP 970CSE 0x1004 Deskjet 970Cse product HP 5400C 0x1005 Scanjet 5400C product HP 2215 0x1016 iPAQ 22xx/Jornada 548 product HP 568J 0x1116 Jornada 568 product HP 930C 0x1204 DeskJet 930c product HP3 RTL8188CU 0x1629 RTL8188CU product HP P2000U 0x1801 Inkjet P-2000U product HP HS2300 0x1e1d HS2300 HSDPA (aka MC8775) product HP 640C 0x2004 DeskJet 640c product HP 4670V 0x3005 ScanJet 4670v product HP P1100 0x3102 Photosmart P1100 product HP LD220 0x3524 LD220 POS Display product HP OJ4215 0x3d11 OfficeJet 4215 product HP HN210E 0x811c Ethernet HN210E product HP2 C500 0x6002 PhotoSmart C500 product HP EV2200 0x1b1d ev2200 HSDPA (aka MC5720) product HP HS2300 0x1e1d hs2300 HSDPA (aka MC8775) /* HTC products */ product HTC WINMOBILE 0x00ce HTC USB Sync product HTC PPC6700MODEM 0x00cf PPC6700 Modem product HTC SMARTPHONE 0x0a51 SmartPhone USB Sync product HTC WIZARD 0x0bce HTC Wizard USB Sync product HTC LEGENDSYNC 0x0c97 HTC Legend USB Sync product HTC LEGEND 0x0ff9 HTC Legend product HTC LEGENDINTERNET 0x0ffe HTC Legend Internet Sharing /* HUAWEI products */ product HUAWEI MOBILE 0x1001 Huawei Mobile product HUAWEI E220 0x1003 HSDPA modem product HUAWEI E220BIS 0x1004 HSDPA modem product HUAWEI E1401 0x1401 3G modem product HUAWEI E1402 0x1402 3G modem product HUAWEI E1403 0x1403 3G modem product HUAWEI E1404 0x1404 3G modem product HUAWEI E1405 0x1405 3G modem product HUAWEI E1406 0x1406 3G modem product HUAWEI E1407 0x1407 3G modem product HUAWEI E1408 0x1408 3G modem product HUAWEI E1409 0x1409 3G modem product HUAWEI E140A 0x140a 3G modem product HUAWEI E140B 0x140b 3G modem product HUAWEI E180V 0x140c E180V product HUAWEI E140D 0x140d 3G modem product HUAWEI E140E 0x140e 3G modem product HUAWEI E140F 0x140f 3G modem product HUAWEI E1410 0x1410 3G modem product HUAWEI E1411 0x1411 3G modem product HUAWEI E1412 0x1412 3G modem product HUAWEI E1413 0x1413 3G modem product HUAWEI E1414 0x1414 3G modem product HUAWEI E1415 0x1415 3G modem product HUAWEI E1416 0x1416 3G modem product HUAWEI E1417 0x1417 3G modem product HUAWEI E1418 0x1418 3G modem product HUAWEI E1419 0x1419 3G modem product HUAWEI E141A 0x141a 3G modem product HUAWEI E141B 0x141b 3G modem product HUAWEI E141C 0x141c 3G modem product HUAWEI E141D 0x141d 3G modem product HUAWEI E141E 0x141e 3G modem product HUAWEI E141F 0x141f 3G modem product HUAWEI E1420 0x1420 3G modem product HUAWEI E1421 0x1421 3G modem product HUAWEI E1422 0x1422 3G modem product HUAWEI E1423 0x1423 3G modem product HUAWEI E1424 0x1424 3G modem product HUAWEI E1425 0x1425 3G modem product HUAWEI E1426 0x1426 3G modem product HUAWEI E1427 0x1427 3G modem product HUAWEI E1428 0x1428 3G modem product HUAWEI E1429 0x1429 3G modem product HUAWEI E142A 0x142a 3G modem product HUAWEI E142B 0x142b 3G modem product HUAWEI E142C 0x142c 3G modem product HUAWEI E142D 0x142d 3G modem product HUAWEI E142E 0x142e 3G modem product HUAWEI E142F 0x142f 3G modem product HUAWEI E1430 0x1430 3G modem product HUAWEI E1431 0x1431 3G modem product HUAWEI E1432 0x1432 3G modem product HUAWEI E1433 0x1433 3G modem product HUAWEI E1434 0x1434 3G modem product HUAWEI E1435 0x1435 3G modem product HUAWEI E1436 0x1436 3G modem product HUAWEI E1437 0x1437 3G modem product HUAWEI E1438 0x1438 3G modem product HUAWEI E1439 0x1439 3G modem product HUAWEI E143A 0x143a 3G modem product HUAWEI E143B 0x143b 3G modem product HUAWEI E143C 0x143c 3G modem product HUAWEI E143D 0x143d 3G modem product HUAWEI E143E 0x143e 3G modem product HUAWEI E143F 0x143f 3G modem product HUAWEI E1752 0x1446 3G modem product HUAWEI K4505 0x1464 3G modem product HUAWEI K3765 0x1465 3G modem product HUAWEI E1820 0x14ac E1820 HSPA+ USB Slider product HUAWEI K3770 0x14c9 3G modem product HUAWEI K3772 0x14cf K3772 product HUAWEI K3770_INIT 0x14d1 K3770 Initial product HUAWEI E3131_INIT 0x14fe 3G modem initial product HUAWEI E392 0x1505 LTE modem product HUAWEI E3131 0x1506 3G modem product HUAWEI K3765_INIT 0x1520 K3765 Initial product HUAWEI K4505_INIT 0x1521 K4505 Initial product HUAWEI K3772_INIT 0x1526 K3772 Initial product HUAWEI E3272_INIT 0x155b LTE modem initial product HUAWEI ME909U 0x1573 LTE modem product HUAWEI R215_INIT 0x1582 LTE modem initial product HUAWEI R215 0x1588 LTE modem product HUAWEI ME909S 0x15c1 LTE modem product HUAWEI ETS2055 0x1803 CDMA modem product HUAWEI E173 0x1c05 3G modem product HUAWEI E173_INIT 0x1c0b 3G modem initial product HUAWEI E3272 0x1c1e LTE modem /* HUAWEI 3com products */ product HUAWEI3COM WUB320G 0x0009 Aolynk WUB320g /* IBM Corporation */ product IBM USBCDROMDRIVE 0x4427 USB CD-ROM Drive /* Icom products */ product ICOM SP1 0x0004 FTDI compatible adapter product ICOM OPC_U_UC 0x0018 FTDI compatible adapter product ICOM RP2C1 0x0009 FTDI compatible adapter product ICOM RP2C2 0x000a FTDI compatible adapter product ICOM RP2D 0x000b FTDI compatible adapter product ICOM RP2KVR 0x0013 FTDI compatible adapter product ICOM RP2KVT 0x0012 FTDI compatible adapter product ICOM RP2VR 0x000d FTDI compatible adapter product ICOM RP2VT 0x000c FTDI compatible adapter product ICOM RP4KVR 0x0011 FTDI compatible adapter product ICOM RP4KVT 0x0010 FTDI compatible adapter /* ID-tech products */ product IDTECH IDT1221U 0x0300 FTDI compatible adapter /* Imagination Technologies products */ product IMAGINATION DBX1 0x2107 DBX1 DSP core /* Initio Corporation products */ product INITIO DUMMY 0x0000 Dummy product product INITIO INIC_1610P 0x1e40 USB to SATA Bridge /* Inside Out Networks products */ product INSIDEOUT EDGEPORT4 0x0001 EdgePort/4 serial ports /* In-System products */ product INSYSTEM F5U002 0x0002 Parallel printer product INSYSTEM ATAPI 0x0031 ATAPI Adapter product INSYSTEM ISD110 0x0200 IDE Adapter ISD110 product INSYSTEM ISD105 0x0202 IDE Adapter ISD105 product INSYSTEM USBCABLE 0x081a USB cable product INSYSTEM STORAGE_V2 0x5701 USB Storage Adapter V2 /* Intel products */ product INTEL EASYPC_CAMERA 0x0110 Easy PC Camera product INTEL TESTBOARD 0x9890 82930 test board product INTEL2 IRMH 0x0020 Integrated Rate Matching Hub product INTEL2 IRMH2 0x0024 Integrated Rate Matching Hub product INTEL2 IRMH3 0x8000 Integrated Rate Matching Hub product INTEL2 IRMH4 0x8008 Integrated Rate Matching Hub /* Interbiometric products */ product INTERBIOMETRICS IOBOARD 0x1002 FTDI compatible adapter product INTERBIOMETRICS MINI_IOBOARD 0x1006 FTDI compatible adapter /* Intersil products */ product INTERSIL PRISM_GT 0x1000 PrismGT USB 2.0 WLAN product INTERSIL PRISM_2X 0x3642 Prism2.x or Atmel WLAN /* Interpid Control Systems products */ product INTREPIDCS VALUECAN 0x0601 ValueCAN CAN bus interface product INTREPIDCS NEOVI 0x0701 NeoVI Blue vehicle bus interface /* I/O DATA products */ product IODATA IU_CD2 0x0204 DVD Multi-plus unit iU-CD2 product IODATA DVR_UEH8 0x0206 DVD Multi-plus unit DVR-UEH8 product IODATA USBSSMRW 0x0314 USB-SSMRW SD-card product IODATA USBSDRW 0x031e USB-SDRW SD-card product IODATA USBETT 0x0901 USB ETT product IODATA USBETTX 0x0904 USB ETTX product IODATA USBETTXS 0x0913 USB ETTX product IODATA USBWNB11A 0x0919 USB WN-B11 product IODATA USBWNB11 0x0922 USB Airport WN-B11 product IODATA ETGUS2 0x0930 ETG-US2 product IODATA WNGDNUS2 0x093f WN-GDN/US2 product IODATA RT3072_1 0x0944 RT3072 product IODATA RT3072_2 0x0945 RT3072 product IODATA RT3072_3 0x0947 RT3072 product IODATA RT3072_4 0x0948 RT3072 product IODATA WNAC867U 0x0952 WN-AC867U product IODATA USBRSAQ 0x0a03 Serial USB-RSAQ1 product IODATA USBRSAQ5 0x0a0e Serial USB-RSAQ5 product IODATA2 USB2SC 0x0a09 USB2.0-SCSI Bridge USB2-SC /* Iomega products */ product IOMEGA ZIP100 0x0001 Zip 100 product IOMEGA ZIP250 0x0030 Zip 250 /* Ionic products */ product IONICS PLUGCOMPUTER 0x0102 FTDI compatible adapter /* Integrated System Solution Corp. products */ product ISSC ISSCBTA 0x1001 Bluetooth USB Adapter /* iTegno products */ product ITEGNO WM1080A 0x1080 WM1080A GSM/GPRS modem product ITEGNO WM2080A 0x2080 WM2080A CDMA modem /* Ituner networks products */ product ITUNERNET USBLCD2X20 0x0002 USB-LCD 2x20 product ITUNERNET USBLCD4X20 0xc001 USB-LCD 4x20 /* Jablotron products */ product JABLOTRON PC60B 0x0001 PC-60B /* Jaton products */ product JATON EDA 0x5704 Ethernet /* Jeti products */ product JETI SPC1201 0x04b2 FTDI compatible adapter /* JMicron products */ product JMICRON JM20336 0x2336 USB to SATA Bridge product JMICRON JM20337 0x2338 USB to ATA/ATAPI Bridge /* JVC products */ product JVC GR_DX95 0x000a GR-DX95 product JVC MP_PRX1 0x3008 MP-PRX1 Ethernet /* JRC products */ product JRC AH_J3001V_J3002V 0x0001 AirH PHONE AH-J3001V/J3002V /* Kamstrrup products */ product KAMSTRUP OPTICALEYE 0x0001 Optical Eye/3-wire product KAMSTRUP MBUS_250D 0x0005 M-Bus Master MultiPort 250D /* Kawatsu products */ product KAWATSU MH4000P 0x0003 MiniHub 4000P /* Keisokugiken Corp. products */ product KEISOKUGIKEN USBDAQ 0x0068 HKS-0200 USBDAQ /* Kensington products */ product KENSINGTON ORBIT 0x1003 Orbit USB/PS2 trackball product KENSINGTON TURBOBALL 0x1005 TurboBall /* Keyspan products */ product KEYSPAN USA28_NF 0x0101 USA-28 serial Adapter (no firmware) product KEYSPAN USA28X_NF 0x0102 USA-28X serial Adapter (no firmware) product KEYSPAN USA19_NF 0x0103 USA-19 serial Adapter (no firmware) product KEYSPAN USA18_NF 0x0104 USA-18 serial Adapter (no firmware) product KEYSPAN USA18X_NF 0x0105 USA-18X serial Adapter (no firmware) product KEYSPAN USA19W_NF 0x0106 USA-19W serial Adapter (no firmware) product KEYSPAN USA19 0x0107 USA-19 serial Adapter product KEYSPAN USA19W 0x0108 USA-19W serial Adapter product KEYSPAN USA49W_NF 0x0109 USA-49W serial Adapter (no firmware) product KEYSPAN USA49W 0x010a USA-49W serial Adapter product KEYSPAN USA19QI_NF 0x010b USA-19QI serial Adapter (no firmware) product KEYSPAN USA19QI 0x010c USA-19QI serial Adapter product KEYSPAN USA19Q_NF 0x010d USA-19Q serial Adapter (no firmware) product KEYSPAN USA19Q 0x010e USA-19Q serial Adapter product KEYSPAN USA28 0x010f USA-28 serial Adapter product KEYSPAN USA28XXB 0x0110 USA-28X/XB serial Adapter product KEYSPAN USA18 0x0111 USA-18 serial Adapter product KEYSPAN USA18X 0x0112 USA-18X serial Adapter product KEYSPAN USA28XB_NF 0x0113 USA-28XB serial Adapter (no firmware) product KEYSPAN USA28XA_NF 0x0114 USA-28XB serial Adapter (no firmware) product KEYSPAN USA28XA 0x0115 USA-28XA serial Adapter product KEYSPAN USA18XA_NF 0x0116 USA-18XA serial Adapter (no firmware) product KEYSPAN USA18XA 0x0117 USA-18XA serial Adapter product KEYSPAN USA19QW_NF 0x0118 USA-19WQ serial Adapter (no firmware) product KEYSPAN USA19QW 0x0119 USA-19WQ serial Adapter product KEYSPAN USA19HA 0x0121 USA-19HS serial Adapter product KEYSPAN UIA10 0x0201 UIA-10 remote control product KEYSPAN UIA11 0x0202 UIA-11 remote control /* Kingston products */ product KINGSTON XX1 0x0008 Ethernet product KINGSTON KNU101TX 0x000a KNU101TX USB Ethernet product KINGSTON HYPERX3_0 0x162b DT HyperX 3.0 /* Kawasaki products */ product KLSI DUH3E10BT 0x0008 USB Ethernet product KLSI DUH3E10BTN 0x0009 USB Ethernet /* Kobil products */ product KOBIL CONV_B1 0x2020 FTDI compatible adapter product KOBIL CONV_KAAN 0x2021 FTDI compatible adapter /* Kodak products */ product KODAK DC220 0x0100 Digital Science DC220 product KODAK DC260 0x0110 Digital Science DC260 product KODAK DC265 0x0111 Digital Science DC265 product KODAK DC290 0x0112 Digital Science DC290 product KODAK DC240 0x0120 Digital Science DC240 product KODAK DC280 0x0130 Digital Science DC280 /* Kontron AG products */ product KONTRON DM9601 0x8101 USB Ethernet product KONTRON JP1082 0x9700 USB Ethernet /* Konica Corp. Products */ product KONICA CAMERA 0x0720 Digital Color Camera /* KYE products */ product KYE NICHE 0x0001 Niche mouse product KYE NETSCROLL 0x0003 Genius NetScroll mouse product KYE FLIGHT2000 0x1004 Flight 2000 joystick product KYE VIVIDPRO 0x2001 ColorPage Vivid-Pro scanner /* Kyocera products */ product KYOCERA FINECAM_S3X 0x0100 Finecam S3x product KYOCERA FINECAM_S4 0x0101 Finecam S4 product KYOCERA FINECAM_S5 0x0103 Finecam S5 product KYOCERA FINECAM_L3 0x0105 Finecam L3 product KYOCERA AHK3001V 0x0203 AH-K3001V product KYOCERA2 CDMA_MSM_K 0x17da Qualcomm Kyocera CDMA Technologies MSM product KYOCERA2 KPC680 0x180a Qualcomm Kyocera CDMA Technologies MSM /* LaCie products */ product LACIE HD 0xa601 Hard Disk product LACIE CDRW 0xa602 CD R/W /* Lake Shore Cryotronics products */ product LAKESHORE 121 0x0100 121 Current Source product LAKESHORE 218A 0x0200 218A Temperature Monitor product LAKESHORE 219 0x0201 219 Temperature Monitor product LAKESHORE 233 0x0202 233 Temperature Transmitter product LAKESHORE 235 0x0203 235 Temperature Transmitter product LAKESHORE 335 0x0300 335 Temperature Controller product LAKESHORE 336 0x0301 336 Temperature Controller product LAKESHORE 350 0x0302 350 Temperature Controller product LAKESHORE 371 0x0303 371 AC Bridge product LAKESHORE 411 0x0400 411 Handheld Gaussmeter product LAKESHORE 425 0x0401 425 Gaussmeter product LAKESHORE 455A 0x0402 455A DSP Gaussmeter product LAKESHORE 475A 0x0403 475A DSP Gaussmeter product LAKESHORE 465 0x0404 465 Gaussmeter product LAKESHORE 625A 0x0600 625A Magnet PSU product LAKESHORE 642A 0x0601 642A Magnet PSU product LAKESHORE 648 0x0602 648 Magnet PSU product LAKESHORE 737 0x0700 737 VSM Controller product LAKESHORE 776 0x0701 776 Matrix Switch /* Larsen and Brusgaard products */ product LARSENBRUSGAARD ALTITRACK 0x0001 FTDI compatible adapter /* Leadtek products */ product LEADTEK 9531 0x2101 9531 GPS /* Lenovo products */ product LENOVO GIGALAN 0x304b USB 3.0 Ethernet product LENOVO ETHERNET 0x7203 USB 2.0 Ethernet /* Lexar products */ product LEXAR JUMPSHOT 0x0001 jumpSHOT CompactFlash Reader product LEXAR CF_READER 0xb002 USB CF Reader product LEXAR JUMPDRIVE 0xa833 USB Jumpdrive Flash Drive /* Lexmark products */ product LEXMARK S2450 0x0009 Optra S 2450 /* Liebert products */ product LIEBERT POWERSURE_PXT 0xffff PowerSure Personal XT product LIEBERT2 PSI1000 0x0004 UPS PSI 1000 FW:08 /* Link Instruments Inc. products */ product LINKINSTRUMENTS MSO19 0xf190 Link Instruments MSO-19 product LINKINSTRUMENTS MSO28 0xf280 Link Instruments MSO-28 product LINKINSTRUMENTS MSO28_2 0xf281 Link Instruments MSO-28 /* Linksys products */ product LINKSYS MAUSB2 0x0105 Camedia MAUSB-2 product LINKSYS USB10TX1 0x200c USB10TX product LINKSYS USB10T 0x2202 USB10T Ethernet product LINKSYS USB100TX 0x2203 USB100TX Ethernet product LINKSYS USB100H1 0x2204 USB100H1 Ethernet/HPNA product LINKSYS USB10TA 0x2206 USB10TA Ethernet product LINKSYS USB10TX2 0x400b USB10TX product LINKSYS2 WUSB11 0x2219 WUSB11 Wireless Adapter product LINKSYS2 USB200M 0x2226 USB 2.0 10/100 Ethernet product LINKSYS3 WUSB11v28 0x2233 WUSB11 v2.8 Wireless Adapter product LINKSYS4 USB1000 0x0039 USB1000 product LINKSYS4 WUSB100 0x0070 WUSB100 product LINKSYS4 WUSB600N 0x0071 WUSB600N product LINKSYS4 WUSB54GCV2 0x0073 WUSB54GC v2 product LINKSYS4 WUSB54GCV3 0x0077 WUSB54GC v3 product LINKSYS4 RT3070 0x0078 RT3070 product LINKSYS4 WUSB600NV2 0x0079 WUSB600N v2 /* Logilink products */ product LOGILINK DUMMY 0x0000 Dummy product product LOGILINK U2M 0x0101 LogiLink USB MIDI Cable /* Logitech products */ product LOGITECH M2452 0x0203 M2452 keyboard product LOGITECH M4848 0x0301 M4848 mouse product LOGITECH PAGESCAN 0x040f PageScan product LOGITECH QUICKCAMWEB 0x0801 QuickCam Web product LOGITECH QUICKCAMPRO 0x0810 QuickCam Pro product LOGITECH WEBCAMC100 0X0817 Webcam C100 product LOGITECH QUICKCAMEXP 0x0840 QuickCam Express product LOGITECH QUICKCAM 0x0850 QuickCam product LOGITECH QUICKCAMPRO3 0x0990 QuickCam Pro 9000 product LOGITECH N43 0xc000 N43 product LOGITECH N48 0xc001 N48 mouse product LOGITECH MBA47 0xc002 M-BA47 mouse product LOGITECH WMMOUSE 0xc004 WingMan Gaming Mouse product LOGITECH BD58 0xc00c BD58 mouse product LOGITECH UN58A 0xc030 iFeel Mouse product LOGITECH UN53B 0xc032 iFeel MouseMan product LOGITECH WMPAD 0xc208 WingMan GamePad Extreme product LOGITECH WMRPAD 0xc20a WingMan RumblePad product LOGITECH WMJOY 0xc281 WingMan Force joystick product LOGITECH BB13 0xc401 USB-PS/2 Trackball product LOGITECH RK53 0xc501 Cordless mouse product LOGITECH RB6 0xc503 Cordless keyboard product LOGITECH MX700 0xc506 Cordless optical mouse product LOGITECH UNIFYING 0xc52b Logitech Unifying Receiver product LOGITECH QUICKCAMPRO2 0xd001 QuickCam Pro /* Logitec Corp. products */ product LOGITEC LDR_H443SU2 0x0033 DVD Multi-plus unit LDR-H443SU2 product LOGITEC LDR_H443U2 0x00b3 DVD Multi-plus unit LDR-H443U2 product LOGITEC LAN_GTJU2A 0x0160 LAN-GTJ/U2A Ethernet product LOGITEC RT2870_1 0x0162 RT2870 product LOGITEC RT2870_2 0x0163 RT2870 product LOGITEC RT2870_3 0x0164 RT2870 product LOGITEC LANW300NU2 0x0166 LAN-W300N/U2 product LOGITEC LANW150NU2 0x0168 LAN-W150N/U2 product LOGITEC LANW300NU2S 0x0169 LAN-W300N/U2S /* Longcheer Holdings, Ltd. products */ product LONGCHEER WM66 0x6061 Longcheer WM66 HSDPA product LONGCHEER W14 0x9603 Mobilcom W14 product LONGCHEER DISK 0xf000 Driver disk product LONGCHEER XSSTICK 0x9605 4G Systems XSStick P14 /* Lucent products */ product LUCENT EVALKIT 0x1001 USS-720 evaluation kit /* Luwen products */ product LUWEN EASYDISK 0x0005 EasyDisc /* Macally products */ product MACALLY MOUSE1 0x0101 mouse /* Mag-Tek products */ product MAGTEK USBSWIPE 0x0002 USB Mag Stripe Swipe Reader /* Marvell Technology Group, Ltd. products */ product MARVELL SHEEVAPLUG 0x9e8f SheevaPlug serial interface /* Matrix Orbital products */ product MATRIXORBITAL FTDI_RANGE_0100 0x0100 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0101 0x0101 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0102 0x0102 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0103 0x0103 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0104 0x0104 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0105 0x0105 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0106 0x0106 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0107 0x0107 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0108 0x0108 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0109 0x0109 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010A 0x010a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010B 0x010b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010C 0x010c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010D 0x010d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010E 0x010e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_010F 0x010f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0110 0x0110 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0111 0x0111 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0112 0x0112 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0113 0x0113 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0114 0x0114 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0115 0x0115 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0116 0x0116 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0117 0x0117 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0118 0x0118 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0119 0x0119 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011A 0x011a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011B 0x011b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011C 0x011c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011D 0x011d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011E 0x011e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_011F 0x011f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0120 0x0120 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0121 0x0121 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0122 0x0122 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0123 0x0123 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0124 0x0124 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0125 0x0125 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0126 0x0126 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0128 0x0128 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0129 0x0129 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012A 0x012a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012B 0x012b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012D 0x012d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012E 0x012e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_012F 0x012f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0130 0x0130 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0131 0x0131 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0132 0x0132 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0133 0x0133 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0134 0x0134 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0135 0x0135 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0136 0x0136 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0137 0x0137 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0138 0x0138 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0139 0x0139 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013A 0x013a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013B 0x013b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013C 0x013c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013D 0x013d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013E 0x013e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_013F 0x013f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0140 0x0140 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0141 0x0141 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0142 0x0142 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0143 0x0143 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0144 0x0144 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0145 0x0145 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0146 0x0146 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0147 0x0147 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0148 0x0148 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0149 0x0149 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014A 0x014a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014B 0x014b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014C 0x014c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014D 0x014d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014E 0x014e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_014F 0x014f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0150 0x0150 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0151 0x0151 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0152 0x0152 FTDI compatible adapter product MATRIXORBITAL MOUA 0x0153 Martrix Orbital MOU-Axxxx LCD displays product MATRIXORBITAL FTDI_RANGE_0159 0x0159 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015A 0x015a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015B 0x015b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015C 0x015c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015D 0x015d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015E 0x015e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_015F 0x015f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0160 0x0160 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0161 0x0161 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0162 0x0162 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0163 0x0163 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0164 0x0164 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0165 0x0165 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0166 0x0166 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0167 0x0167 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0168 0x0168 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0169 0x0169 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016A 0x016a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016B 0x016b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016C 0x016c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016D 0x016d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016E 0x016e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_016F 0x016f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0170 0x0170 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0171 0x0171 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0172 0x0172 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0173 0x0173 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0174 0x0174 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0175 0x0175 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0176 0x0176 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0177 0x0177 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0178 0x0178 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0179 0x0179 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017A 0x017a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017B 0x017b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017C 0x017c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017D 0x017d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017E 0x017e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_017F 0x017f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0180 0x0180 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0181 0x0181 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0182 0x0182 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0183 0x0183 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0184 0x0184 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0185 0x0185 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0186 0x0186 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0187 0x0187 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0188 0x0188 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0189 0x0189 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018A 0x018a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018B 0x018b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018C 0x018c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018D 0x018d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018E 0x018e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_018F 0x018f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0190 0x0190 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0191 0x0191 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0192 0x0192 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0193 0x0193 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0194 0x0194 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0195 0x0195 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0196 0x0196 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0197 0x0197 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0198 0x0198 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_0199 0x0199 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019A 0x019a FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019B 0x019b FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019C 0x019c FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019D 0x019d FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019E 0x019e FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_019F 0x019f FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A0 0x01a0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A1 0x01a1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A2 0x01a2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A3 0x01a3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A4 0x01a4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A5 0x01a5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A6 0x01a6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A7 0x01a7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A8 0x01a8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01A9 0x01a9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AA 0x01aa FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AB 0x01ab FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AC 0x01ac FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AD 0x01ad FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AE 0x01ae FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01AF 0x01af FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B0 0x01b0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B1 0x01b1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B2 0x01b2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B3 0x01b3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B4 0x01b4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B5 0x01b5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B6 0x01b6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B7 0x01b7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B8 0x01b8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01B9 0x01b9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BA 0x01ba FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BB 0x01bb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BC 0x01bc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BD 0x01bd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BE 0x01be FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01BF 0x01bf FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C0 0x01c0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C1 0x01c1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C2 0x01c2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C3 0x01c3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C4 0x01c4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C5 0x01c5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C6 0x01c6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C7 0x01c7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C8 0x01c8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01C9 0x01c9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CA 0x01ca FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CB 0x01cb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CC 0x01cc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CD 0x01cd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CE 0x01ce FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01CF 0x01cf FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D0 0x01d0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D1 0x01d1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D2 0x01d2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D3 0x01d3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D4 0x01d4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D5 0x01d5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D6 0x01d6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D7 0x01d7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D8 0x01d8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01D9 0x01d9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DA 0x01da FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DB 0x01db FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DC 0x01dc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DD 0x01dd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DE 0x01de FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01DF 0x01df FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E0 0x01e0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E1 0x01e1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E2 0x01e2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E3 0x01e3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E4 0x01e4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E5 0x01e5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E6 0x01e6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E7 0x01e7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E8 0x01e8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01E9 0x01e9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EA 0x01ea FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EB 0x01eb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EC 0x01ec FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01ED 0x01ed FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EE 0x01ee FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01EF 0x01ef FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F0 0x01f0 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F1 0x01f1 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F2 0x01f2 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F3 0x01f3 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F4 0x01f4 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F5 0x01f5 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F6 0x01f6 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F7 0x01f7 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F8 0x01f8 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01F9 0x01f9 FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FA 0x01fa FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FB 0x01fb FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FC 0x01fc FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FD 0x01fd FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FE 0x01fe FTDI compatible adapter product MATRIXORBITAL FTDI_RANGE_01FF 0x01ff FTDI compatible adapter /* MCT Corp. */ product MCT HUB0100 0x0100 Hub product MCT DU_H3SP_USB232 0x0200 D-Link DU-H3SP USB BAY Hub product MCT USB232 0x0210 USB-232 Interface product MCT SITECOM_USB232 0x0230 Sitecom USB-232 Products /* Medeli */ product MEDELI DD305 0x5011 DD305 Digital Drum Set /* MediaTek, Inc. */ product MEDIATEK MTK3329 0x3329 MTK II GPS Receiver /* Meizu Electronics */ product MEIZU M6_SL 0x0140 MiniPlayer M6 (SL) /* Melco, Inc products */ product MELCO LUATX1 0x0001 LUA-TX Ethernet product MELCO LUATX5 0x0005 LUA-TX Ethernet product MELCO LUA2TX5 0x0009 LUA2-TX Ethernet product MELCO LUAKTX 0x0012 LUA-KTX Ethernet product MELCO DUBPXXG 0x001c DUB-PxxG product MELCO LUAU2KTX 0x003d LUA-U2-KTX Ethernet product MELCO KG54YB 0x005e WLI-U2-KG54-YB WLAN product MELCO KG54 0x0066 WLI-U2-KG54 WLAN product MELCO KG54AI 0x0067 WLI-U2-KG54-AI WLAN product MELCO LUA3U2AGT 0x006e LUA3-U2-AGT product MELCO NINWIFI 0x008b Nintendo Wi-Fi product MELCO PCOPRS1 0x00b3 PC-OP-RS1 RemoteStation product MELCO SG54HP 0x00d8 WLI-U2-SG54HP product MELCO G54HP 0x00d9 WLI-U2-G54HP product MELCO KG54L 0x00da WLI-U2-KG54L product MELCO WLIUCG300N 0x00e8 WLI-UC-G300N product MELCO SG54HG 0x00f4 WLI-U2-SG54HG product MELCO WLRUCG 0x0116 WLR-UC-G product MELCO WLRUCGAOSS 0x0119 WLR-UC-G-AOSS product MELCO WLIUCAG300N 0x012e WLI-UC-AG300N product MELCO WLIUCG 0x0137 WLI-UC-G product MELCO WLIUCG300HP 0x0148 WLI-UC-G300HP product MELCO RT2870_2 0x0150 RT2870 product MELCO WLIUCGN 0x015d WLI-UC-GN product MELCO WLIUCG301N 0x016f WLI-UC-G301N product MELCO WLIUCGNM 0x01a2 WLI-UC-GNM product MELCO WLIUCG300HPV1 0x01a8 WLI-UC-G300HP-V1 product MELCO WLIUCGNM2 0x01ee WLI-UC-GNM2 product MELCO WIU2433DM 0x0242 WI-U2-433DM product MELCO WIU3866D 0x025d WI-U3-866D /* Merlin products */ product MERLIN V620 0x1110 Merlin V620 /* MetaGeek products */ product METAGEEK TELLSTICK 0x0c30 FTDI compatible adapter product METAGEEK WISPY1B 0x083e MetaGeek Wi-Spy product METAGEEK WISPY24X 0x083f MetaGeek Wi-Spy 2.4x product METAGEEK2 WISPYDBX 0x5000 MetaGeek Wi-Spy DBx /* Metricom products */ product METRICOM RICOCHET_GS 0x0001 Ricochet GS /* MGE UPS Systems */ product MGE UPS1 0x0001 MGE UPS SYSTEMS PROTECTIONCENTER 1 product MGE UPS2 0xffff MGE UPS SYSTEMS PROTECTIONCENTER 2 /* MEI products */ product MEI CASHFLOW_SC 0x1100 Cashflow-SC Cash Acceptor product MEI S2000 0x1101 Series 2000 Combo Acceptor /* Microdia / Sonix Techonology Co., Ltd. products */ product CHICONY2 YUREX 0x1010 YUREX product CHICONY2 CAM_1 0x62c0 CAM_1 product CHICONY2 TEMPER 0x7401 TEMPer sensor /* Micro Star International products */ product MSI BT_DONGLE 0x1967 Bluetooth USB dongle product MSI RT3070_1 0x3820 RT3070 product MSI RT3070_2 0x3821 RT3070 product MSI RT3070_8 0x3822 RT3070 product MSI RT3070_3 0x3870 RT3070 product MSI RT3070_9 0x3871 RT3070 product MSI UB11B 0x6823 UB11B product MSI RT2570 0x6861 RT2570 product MSI RT2570_2 0x6865 RT2570 product MSI RT2570_3 0x6869 RT2570 product MSI RT2573_1 0x6874 RT2573 product MSI RT2573_2 0x6877 RT2573 product MSI RT3070_4 0x6899 RT3070 product MSI RT3070_5 0x821a RT3070 product MSI RT3070_10 0x822a RT3070 product MSI RT3070_6 0x870a RT3070 product MSI RT3070_11 0x871a RT3070 product MSI RT3070_7 0x899a RT3070 product MSI RT2573_3 0xa861 RT2573 product MSI RT2573_4 0xa874 RT2573 /* Micron products */ product MICRON REALSSD 0x0655 Real SSD eUSB /* Microsoft products */ product MICROSOFT SIDEPREC 0x0008 SideWinder Precision Pro product MICROSOFT INTELLIMOUSE 0x0009 IntelliMouse product MICROSOFT NATURALKBD 0x000b Natural Keyboard Elite product MICROSOFT DDS80 0x0014 Digital Sound System 80 product MICROSOFT SIDEWINDER 0x001a Sidewinder Precision Racing Wheel product MICROSOFT INETPRO 0x001c Internet Keyboard Pro product MICROSOFT TBEXPLORER 0x0024 Trackball Explorer product MICROSOFT INTELLIEYE 0x0025 IntelliEye mouse product MICROSOFT INETPRO2 0x002b Internet Keyboard Pro product MICROSOFT INTELLIMOUSE5 0x0039 IntelliMouse 1.1 5-Button Mouse product MICROSOFT WHEELMOUSE 0x0040 Wheel Mouse Optical product MICROSOFT MN510 0x006e MN510 Wireless product MICROSOFT 700WX 0x0079 Palm 700WX product MICROSOFT MN110 0x007a 10/100 USB NIC product MICROSOFT WLINTELLIMOUSE 0x008c Wireless Optical IntelliMouse product MICROSOFT WLNOTEBOOK 0x00b9 Wireless Optical Mouse (Model 1023) product MICROSOFT COMFORT3000 0x00d1 Comfort Optical Mouse 3000 (Model 1043) product MICROSOFT WLNOTEBOOK3 0x00d2 Wireless Optical Mouse 3000 (Model 1049) product MICROSOFT NATURAL4000 0x00db Natural Ergonomic Keyboard 4000 product MICROSOFT WLNOTEBOOK2 0x00e1 Wireless Optical Mouse 3000 (Model 1056) product MICROSOFT XBOX360 0x0292 XBOX 360 WLAN /* Microtech products */ product MICROTECH SCSIDB25 0x0004 USB-SCSI-DB25 product MICROTECH SCSIHD50 0x0005 USB-SCSI-HD50 product MICROTECH DPCM 0x0006 USB CameraMate product MICROTECH FREECOM 0xfc01 Freecom USB-IDE /* Microtek products */ product MICROTEK 336CX 0x0094 Phantom 336CX - C3 scanner product MICROTEK X6U 0x0099 ScanMaker X6 - X6U product MICROTEK C6 0x009a Phantom C6 scanner product MICROTEK 336CX2 0x00a0 Phantom 336CX - C3 scanner product MICROTEK V6USL 0x00a3 ScanMaker V6USL product MICROTEK V6USL2 0x80a3 ScanMaker V6USL product MICROTEK V6UL 0x80ac ScanMaker V6UL /* Microtune, Inc. products */ product MICROTUNE BT_DONGLE 0x1000 Bluetooth USB dongle /* Midiman products */ product MAUDIO MIDISPORT2X2 0x1001 Midisport 2x2 product MAUDIO FASTTRACKULTRA 0x2080 Fast Track Ultra product MAUDIO FASTTRACKULTRA8R 0x2081 Fast Track Ultra 8R /* MindsAtWork products */ product MINDSATWORK WALLET 0x0001 Digital Wallet /* Minolta Co., Ltd. */ product MINOLTA 2300 0x4001 Dimage 2300 product MINOLTA S304 0x4007 Dimage S304 product MINOLTA X 0x4009 Dimage X product MINOLTA 5400 0x400e Dimage 5400 product MINOLTA F300 0x4011 Dimage F300 product MINOLTA E223 0x4017 Dimage E223 /* Mitsumi products */ product MITSUMI CDRRW 0x0000 CD-R/RW Drive product MITSUMI BT_DONGLE 0x641f Bluetooth USB dongle product MITSUMI FDD 0x6901 USB FDD /* Mobile Action products */ product MOBILEACTION MA620 0x0620 MA-620 Infrared Adapter /* Mobility products */ product MOBILITY USB_SERIAL 0x0202 FTDI compatible adapter product MOBILITY EA 0x0204 Ethernet product MOBILITY EASIDOCK 0x0304 EasiDock Ethernet /* MosChip products */ product MOSCHIP MCS7703 0x7703 MCS7703 Serial Port Adapter product MOSCHIP MCS7730 0x7730 MCS7730 Ethernet product MOSCHIP MCS7820 0x7820 MCS7820 Serial Port Adapter product MOSCHIP MCS7830 0x7830 MCS7830 Ethernet product MOSCHIP MCS7832 0x7832 MCS7832 Ethernet product MOSCHIP MCS7840 0x7840 MCS7840 Serial Port Adapter /* Motorola products */ product MOTOROLA MC141555 0x1555 MC141555 hub controller product MOTOROLA SB4100 0x4100 SB4100 USB Cable Modem product MOTOROLA2 T720C 0x2822 T720c product MOTOROLA2 A41XV32X 0x2a22 A41x/V32x Mobile Phones product MOTOROLA2 E398 0x4810 E398 Mobile Phone product MOTOROLA2 USBLAN 0x600c USBLAN product MOTOROLA2 USBLAN2 0x6027 USBLAN product MOTOROLA2 MB886 0x710f MB886 Mobile Phone (Atria HD) product MOTOROLA4 RT2770 0x9031 RT2770 product MOTOROLA4 RT3070 0x9032 RT3070 /* MpMan products */ product MPMAN MPF400_2 0x25a8 MPF400 Music Player 2Go product MPMAN MPF400_1 0x36d0 MPF400 Music Player 1Go /* MultiTech products */ product MULTITECH ATLAS 0xf101 MT5634ZBA-USB modem /* Mustek products */ product MUSTEK 1200CU 0x0001 1200 CU scanner product MUSTEK 600CU 0x0002 600 CU scanner product MUSTEK 1200USB 0x0003 1200 USB scanner product MUSTEK 1200UB 0x0006 1200 UB scanner product MUSTEK 1200USBPLUS 0x0007 1200 USB Plus scanner product MUSTEK 1200CUPLUS 0x0008 1200 CU Plus scanner product MUSTEK BEARPAW1200F 0x0010 BearPaw 1200F scanner product MUSTEK BEARPAW2400TA 0x0218 BearPaw 2400TA scanner product MUSTEK BEARPAW1200TA 0x021e BearPaw 1200TA scanner product MUSTEK 600USB 0x0873 600 USB scanner product MUSTEK MDC800 0xa800 MDC-800 digital camera /* M-Systems products */ product MSYSTEMS DISKONKEY 0x0010 DiskOnKey product MSYSTEMS DISKONKEY2 0x0011 DiskOnKey /* Myson products */ product MYSON HEDEN_8813 0x8813 USB-IDE product MYSON HEDEN 0x8818 USB-IDE product MYSON HUBREADER 0x8819 COMBO Card reader with USB HUB product MYSON STARREADER 0x9920 USB flash card adapter /* National Semiconductor */ product NATIONAL BEARPAW1200 0x1000 BearPaw 1200 product NATIONAL BEARPAW2400 0x1001 BearPaw 2400 /* NEC products */ product NEC HUB_0050 0x0050 USB 2.0 7-Port Hub product NEC HUB_005A 0x005a USB 2.0 4-Port Hub product NEC WL300NUG 0x0249 WL300NU-G product NEC WL900U 0x0408 Aterm WL900U product NEC HUB 0x55aa hub product NEC HUB_B 0x55ab hub /* NEODIO products */ product NEODIO ND3260 0x3260 8-in-1 Multi-format Flash Controller product NEODIO ND5010 0x5010 Multi-format Flash Controller /* Neotel products */ product NEOTEL PRIME 0x4000 Prime USB modem /* Netac products */ product NETAC CF_CARD 0x1060 USB-CF-Card product NETAC ONLYDISK 0x0003 OnlyDisk /* NetChip Technology Products */ product NETCHIP TURBOCONNECT 0x1080 Turbo-Connect product NETCHIP CLIK_40 0xa140 USB Clik! 40 product NETCHIP GADGETZERO 0xa4a0 Linux Gadget Zero product NETCHIP ETHERNETGADGET 0xa4a2 Linux Ethernet/RNDIS gadget on pxa210/25x/26x product NETCHIP POCKETBOOK 0xa4a5 PocketBook /* Netgear products */ product NETGEAR EA101 0x1001 Ethernet product NETGEAR EA101X 0x1002 Ethernet product NETGEAR FA101 0x1020 Ethernet 10/100, USB1.1 product NETGEAR FA120 0x1040 USB 2.0 Ethernet product NETGEAR M4100 0x1100 M4100/M5300/M7100 series switch product NETGEAR WG111V1_2 0x4240 PrismGT USB 2.0 WLAN product NETGEAR WG111V3 0x4260 WG111v3 product NETGEAR WG111U 0x4300 WG111U product NETGEAR WG111U_NF 0x4301 WG111U (no firmware) product NETGEAR WG111V2 0x6a00 WG111V2 product NETGEAR WN111V2 0x9001 WN111V2 product NETGEAR WNDA3100 0x9010 WNDA3100 product NETGEAR WNDA4100 0x9012 WNDA4100 product NETGEAR WNDA3200 0x9018 WNDA3200 product NETGEAR RTL8192CU 0x9021 RTL8192CU product NETGEAR WNA1000 0x9040 WNA1000 product NETGEAR WNA1000M 0x9041 WNA1000M product NETGEAR A6100 0x9052 A6100 product NETGEAR2 MA101 0x4100 MA101 product NETGEAR2 MA101B 0x4102 MA101 Rev B product NETGEAR3 WG111T 0x4250 WG111T product NETGEAR3 WG111T_NF 0x4251 WG111T (no firmware) product NETGEAR3 WPN111 0x5f00 WPN111 product NETGEAR3 WPN111_NF 0x5f01 WPN111 (no firmware) product NETGEAR3 WPN111_2 0x5f02 WPN111 product NETGEAR4 RTL8188CU 0x9041 RTL8188CU /* NetIndex products */ product NETINDEX WS002IN 0x2001 Willcom WS002IN /* NEWlink */ product NEWLINK USB2IDEBRIDGE 0x00ff USB 2.0 Hard Drive Enclosure /* Nikon products */ product NIKON E990 0x0102 Digital Camera E990 product NIKON LS40 0x4000 CoolScan LS40 ED product NIKON D300 0x041a Digital Camera D300 /* NovaTech Products */ product NOVATECH NV902 0x9020 NovaTech NV-902W product NOVATECH RT2573 0x9021 RT2573 product NOVATECH RTL8188CU 0x9071 RTL8188CU /* Nokia products */ product NOKIA N958GB 0x0070 Nokia N95 8GBc product NOKIA2 CA42 0x1234 CA-42 cable /* Novatel Wireless products */ product NOVATEL V640 0x1100 Merlin V620 product NOVATEL CDMA_MODEM 0x1110 Novatel Wireless Merlin CDMA product NOVATEL V620 0x1110 Merlin V620 product NOVATEL V740 0x1120 Merlin V740 product NOVATEL V720 0x1130 Merlin V720 product NOVATEL U740 0x1400 Merlin U740 product NOVATEL U740_2 0x1410 Merlin U740 product NOVATEL U870 0x1420 Merlin U870 product NOVATEL XU870 0x1430 Merlin XU870 product NOVATEL X950D 0x1450 Merlin X950D product NOVATEL ES620 0x2100 Expedite ES620 product NOVATEL E725 0x2120 Expedite E725 product NOVATEL ES620_2 0x2130 Expedite ES620 product NOVATEL ES620 0x2100 ES620 CDMA product NOVATEL U720 0x2110 Merlin U720 product NOVATEL EU730 0x2400 Expedite EU730 product NOVATEL EU740 0x2410 Expedite EU740 product NOVATEL EU870D 0x2420 Expedite EU870D product NOVATEL U727 0x4100 Merlin U727 CDMA product NOVATEL MC950D 0x4400 Novatel MC950D HSUPA product NOVATEL MC990D 0x7001 Novatel MC990D product NOVATEL ZEROCD 0x5010 Novatel ZeroCD product NOVATEL MIFI2200V 0x5020 Novatel MiFi 2200 CDMA Virgin Mobile product NOVATEL ZEROCD2 0x5030 Novatel ZeroCD product NOVATEL MIFI2200 0x5041 Novatel MiFi 2200 CDMA product NOVATEL U727_2 0x5100 Merlin U727 CDMA product NOVATEL U760 0x6000 Novatel U760 product NOVATEL MC760 0x6002 Novatel MC760 product NOVATEL MC547 0x7042 Novatel MC547 product NOVATEL MC679 0x7031 Novatel MC679 product NOVATEL2 FLEXPACKGPS 0x0100 NovAtel FlexPack GPS receiver /* Merlin products */ product MERLIN V620 0x1110 Merlin V620 /* O2Micro products */ product O2MICRO OZ776_HUB 0x7761 OZ776 hub product O2MICRO OZ776_CCID_SC 0x7772 OZ776 CCID SC Reader /* Olimex products */ product OLIMEX ARM_USB_OCD 0x0003 FTDI compatible adapter product OLIMEX ARM_USB_OCD_H 0x002b FTDI compatible adapter /* Olympus products */ product OLYMPUS C1 0x0102 C-1 Digital Camera product OLYMPUS C700 0x0105 C-700 Ultra Zoom /* OmniVision Technologies, Inc. products */ product OMNIVISION OV511 0x0511 OV511 Camera product OMNIVISION OV511PLUS 0xa511 OV511+ Camera /* OnSpec Electronic, Inc. */ product ONSPEC SDS_HOTFIND_D 0x0400 SDS-infrared.com Hotfind-D Infrared Camera product ONSPEC MDCFE_B_CF_READER 0xa000 MDCFE-B USB CF Reader product ONSPEC CFMS_RW 0xa001 SIIG/Datafab Memory Stick+CF Reader/Writer product ONSPEC READER 0xa003 Datafab-based Reader product ONSPEC CFSM_READER 0xa005 PNY/Datafab CF+SM Reader product ONSPEC CFSM_READER2 0xa006 Simple Tech/Datafab CF+SM Reader product ONSPEC MDSM_B_READER 0xa103 MDSM-B reader product ONSPEC CFSM_COMBO 0xa109 USB to CF + SM Combo (LC1) product ONSPEC UCF100 0xa400 FlashLink UCF-100 CompactFlash Reader product ONSPEC2 IMAGEMATE_SDDR55 0xa103 ImageMate SDDR55 /* Option products */ product OPTION VODAFONEMC3G 0x5000 Vodafone Mobile Connect 3G datacard product OPTION GT3G 0x6000 GlobeTrotter 3G datacard product OPTION GT3GQUAD 0x6300 GlobeTrotter 3G QUAD datacard product OPTION GT3GPLUS 0x6600 GlobeTrotter 3G+ datacard product OPTION GTICON322 0xd033 GlobeTrotter Icon322 storage product OPTION GTMAX36 0x6701 GlobeTrotter Max 3.6 Modem product OPTION GTMAX72 0x6711 GlobeTrotter Max 7.2 HSDPA product OPTION GTHSDPA 0x6971 GlobeTrotter HSDPA product OPTION GTMAXHSUPA 0x7001 GlobeTrotter HSUPA product OPTION GTMAXHSUPAE 0x6901 GlobeTrotter HSUPA PCIe product OPTION GTMAX380HSUPAE 0x7211 GlobeTrotter 380HSUPA PCIe product OPTION GT3G_1 0x6050 3G modem product OPTION GT3G_2 0x6100 3G modem product OPTION GT3G_3 0x6150 3G modem product OPTION GT3G_4 0x6200 3G modem product OPTION GT3G_5 0x6250 3G modem product OPTION GT3G_6 0x6350 3G modem product OPTION E6500 0x6500 3G modem product OPTION E6501 0x6501 3G modem product OPTION E6601 0x6601 3G modem product OPTION E6721 0x6721 3G modem product OPTION E6741 0x6741 3G modem product OPTION E6761 0x6761 3G modem product OPTION E6800 0x6800 3G modem product OPTION E7021 0x7021 3G modem product OPTION E7041 0x7041 3G modem product OPTION E7061 0x7061 3G modem product OPTION E7100 0x7100 3G modem product OPTION GTM380 0x7201 3G modem product OPTION GE40X 0x7601 Globetrotter HSUPA product OPTION GSICON72 0x6911 GlobeSurfer iCON product OPTION GSICONHSUPA 0x7251 Globetrotter HSUPA product OPTION ICON401 0x7401 GlobeSurfer iCON 401 product OPTION GTHSUPA 0x7011 Globetrotter HSUPA product OPTION GMT382 0x7501 Globetrotter HSUPA product OPTION GE40X_1 0x7301 Globetrotter HSUPA product OPTION GE40X_2 0x7361 Globetrotter HSUPA product OPTION GE40X_3 0x7381 Globetrotter HSUPA product OPTION GTM661W 0x9000 GTM661W product OPTION ICONEDGE 0xc031 GlobeSurfer iCON EDGE product OPTION MODHSXPA 0xd013 Globetrotter HSUPA product OPTION ICON321 0xd031 Globetrotter HSUPA product OPTION ICON505 0xd055 Globetrotter iCON 505 product OPTION ICON452 0x7901 Globetrotter iCON 452 /* Optoelectronics Co., Ltd */ product OPTO BARCODE 0x0001 Barcode Reader product OPTO OPTICONCODE 0x0009 Opticon Code Reader product OPTO BARCODE_1 0xa002 Barcode Reader product OPTO CRD7734 0xc000 USB Cradle CRD-7734-RU product OPTO CRD7734_1 0xc001 USB Cradle CRD-7734-RU /* OvisLink product */ product OVISLINK RT3072 0x3072 RT3072 /* OQO */ product OQO WIFI01 0x0002 model 01 WiFi interface product OQO BT01 0x0003 model 01 Bluetooth interface product OQO ETHER01PLUS 0x7720 model 01+ Ethernet product OQO ETHER01 0x8150 model 01 Ethernet interface /* Ours Technology Inc. */ product OTI DKU5 0x6858 DKU-5 Serial /* Owen.ru products */ product OWEN AC4 0x0004 AC4 USB-RS485 converter /* OWL producs */ product OWL CM_160 0xca05 OWL CM-160 power monitor /* Palm Computing, Inc. product */ product PALM SERIAL 0x0080 USB Serial product PALM M500 0x0001 Palm m500 product PALM M505 0x0002 Palm m505 product PALM M515 0x0003 Palm m515 product PALM I705 0x0020 Palm i705 product PALM TUNGSTEN_Z 0x0031 Palm Tungsten Z product PALM M125 0x0040 Palm m125 product PALM M130 0x0050 Palm m130 product PALM TUNGSTEN_T 0x0060 Palm Tungsten T product PALM ZIRE31 0x0061 Palm Zire 31 product PALM ZIRE 0x0070 Palm Zire /* Panasonic products */ product PANASONIC LS120CAM 0x0901 LS-120 Camera product PANASONIC KXL840AN 0x0d01 CD-R Drive KXL-840AN product PANASONIC KXLRW32AN 0x0d09 CD-R Drive KXL-RW32AN product PANASONIC KXLCB20AN 0x0d0a CD-R Drive KXL-CB20AN product PANASONIC KXLCB35AN 0x0d0e DVD-ROM & CD-R/RW product PANASONIC SDCAAE 0x1b00 MultiMediaCard product PANASONIC TYTP50P6S 0x3900 TY-TP50P6-S 50in Touch Panel /* Papouch products */ product PAPOUCH AD4USB 0x8003 FTDI compatible adapter product PAPOUCH AP485 0x0101 FTDI compatible adapter product PAPOUCH AP485_2 0x0104 FTDI compatible adapter product PAPOUCH DRAK5 0x0700 FTDI compatible adapter product PAPOUCH DRAK6 0x1000 FTDI compatible adapter product PAPOUCH GMSR 0x8005 FTDI compatible adapter product PAPOUCH GMUX 0x8004 FTDI compatible adapter product PAPOUCH IRAMP 0x0500 FTDI compatible adapter product PAPOUCH LEC 0x0300 FTDI compatible adapter product PAPOUCH MU 0x8001 FTDI compatible adapter product PAPOUCH QUIDO10X1 0x0b00 FTDI compatible adapter product PAPOUCH QUIDO2X16 0x0e00 FTDI compatible adapter product PAPOUCH QUIDO2X2 0x0a00 FTDI compatible adapter product PAPOUCH QUIDO30X3 0x0c00 FTDI compatible adapter product PAPOUCH QUIDO3X32 0x0f00 FTDI compatible adapter product PAPOUCH QUIDO4X4 0x0900 FTDI compatible adapter product PAPOUCH QUIDO60X3 0x0d00 FTDI compatible adapter product PAPOUCH QUIDO8X8 0x0800 FTDI compatible adapter product PAPOUCH SB232 0x0301 FTDI compatible adapter product PAPOUCH SB422 0x0102 FTDI compatible adapter product PAPOUCH SB422_2 0x0105 FTDI compatible adapter product PAPOUCH SB485 0x0100 FTDI compatible adapter product PAPOUCH SB485C 0x0107 FTDI compatible adapter product PAPOUCH SB485S 0x0106 FTDI compatible adapter product PAPOUCH SB485_2 0x0103 FTDI compatible adapter product PAPOUCH SIMUKEY 0x8002 FTDI compatible adapter product PAPOUCH TMU 0x0400 FTDI compatible adapter product PAPOUCH UPSUSB 0x8000 FTDI compatible adapter /* PARA Industrial products */ product PARA RT3070 0x8888 RT3070 /* Simtec Electronics products */ product SIMTEC ENTROPYKEY 0x0001 Entropy Key /* Pegatron products */ product PEGATRON RT2870 0x0002 RT2870 product PEGATRON RT3070 0x000c RT3070 product PEGATRON RT3070_2 0x000e RT3070 product PEGATRON RT3070_3 0x0010 RT3070 /* Peracom products */ product PERACOM SERIAL1 0x0001 Serial product PERACOM ENET 0x0002 Ethernet product PERACOM ENET3 0x0003 At Home Ethernet product PERACOM ENET2 0x0005 Ethernet /* Philips products */ product PHILIPS DSS350 0x0101 DSS 350 Digital Speaker System product PHILIPS DSS 0x0104 DSS XXX Digital Speaker System product PHILIPS HUB 0x0201 hub product PHILIPS PCA646VC 0x0303 PCA646VC PC Camera product PHILIPS PCVC680K 0x0308 PCVC680K Vesta Pro PC Camera product PHILIPS DSS150 0x0471 DSS 150 Digital Speaker System product PHILIPS ACE1001 0x066a AKTAKOM ACE-1001 cable product PHILIPS SPE3030CC 0x083a USB 2.0 External Disk product PHILIPS SNU5600 0x1236 SNU5600 product PHILIPS UM10016 0x1552 ISP 1581 Hi-Speed USB MPEG2 Encoder Reference Kit product PHILIPS DIVAUSB 0x1801 DIVA USB mp3 player product PHILIPS RT2870 0x200f RT2870 /* Philips Semiconductor products */ product PHILIPSSEMI HUB1122 0x1122 HUB /* Megatec */ product MEGATEC UPS 0x5161 Phoenixtec protocol based UPS /* P.I. Engineering products */ product PIENGINEERING PS2USB 0x020b PS2 to Mac USB Adapter /* Planex Communications products */ product PLANEX GW_US11H 0x14ea GW-US11H WLAN product PLANEX2 RTL8188CUS 0x1201 RTL8188CUS product PLANEX2 GW_US11S 0x3220 GW-US11S WLAN product PLANEX2 GW_US54GXS 0x5303 GW-US54GXS WLAN product PLANEX2 GW_US300 0x5304 GW-US300 product PLANEX2 RTL8188CU_1 0xab2a RTL8188CU product PLANEX2 RTL8188CU_2 0xed17 RTL8188CU product PLANEX2 RTL8188CU_3 0x4902 RTL8188CU product PLANEX2 RTL8188CU_4 0xab2e RTL8188CU product PLANEX2 RTL8192CU 0xab2b RTL8192CU product PLANEX2 GWUS54HP 0xab01 GW-US54HP product PLANEX2 GWUS300MINIS 0xab24 GW-US300MiniS product PLANEX2 RT3070 0xab25 RT3070 product PLANEX2 MZKUE150N 0xab2f MZK-UE150N product PLANEX2 GW900D 0xab30 GW-900D product PLANEX2 GWUS54MINI2 0xab50 GW-US54Mini2 product PLANEX2 GWUS54SG 0xc002 GW-US54SG product PLANEX2 GWUS54GZL 0xc007 GW-US54GZL product PLANEX2 GWUS54GD 0xed01 GW-US54GD product PLANEX2 GWUSMM 0xed02 GW-USMM product PLANEX2 RT2870 0xed06 RT2870 product PLANEX2 GWUSMICRON 0xed14 GW-USMicroN product PLANEX2 GWUSVALUEEZ 0xed17 GW-USValue-EZ product PLANEX3 GWUS54GZ 0xab10 GW-US54GZ product PLANEX3 GU1000T 0xab11 GU-1000T product PLANEX3 GWUS54MINI 0xab13 GW-US54Mini product PLANEX2 GWUSNANO 0xab28 GW-USNano /* Plextor Corp. */ product PLEXTOR 40_12_40U 0x0011 PlexWriter 40/12/40U /* PLX products */ product PLX TESTBOARD 0x9060 test board product PLX CA42 0xac70 CA-42 /* PNY products */ product PNY ATTACHE2 0x0010 USB 2.0 Flash Drive /* PortGear products */ product PORTGEAR EA8 0x0008 Ethernet product PORTGEAR EA9 0x0009 Ethernet /* Portsmith products */ product PORTSMITH EEA 0x3003 Express Ethernet /* Posiflex products */ product POSIFLEX PP7000 0x0300 FTDI compatible adapter /* Primax products */ product PRIMAX G2X300 0x0300 G2-200 scanner product PRIMAX G2E300 0x0301 G2E-300 scanner product PRIMAX G2300 0x0302 G2-300 scanner product PRIMAX G2E3002 0x0303 G2E-300 scanner product PRIMAX 9600 0x0340 Colorado USB 9600 scanner product PRIMAX 600U 0x0341 Colorado 600u scanner product PRIMAX 6200 0x0345 Visioneer 6200 scanner product PRIMAX 19200 0x0360 Colorado USB 19200 scanner product PRIMAX 1200U 0x0361 Colorado 1200u scanner product PRIMAX G600 0x0380 G2-600 scanner product PRIMAX 636I 0x0381 ReadyScan 636i product PRIMAX G2600 0x0382 G2-600 scanner product PRIMAX G2E600 0x0383 G2E-600 scanner product PRIMAX COMFORT 0x4d01 Comfort product PRIMAX MOUSEINABOX 0x4d02 Mouse-in-a-Box product PRIMAX PCGAUMS1 0x4d04 Sony PCGA-UMS1 product PRIMAX HP_RH304AA 0x4d17 HP RH304AA mouse /* Prolific products */ product PROLIFIC PL2301 0x0000 PL2301 Host-Host interface product PROLIFIC PL2302 0x0001 PL2302 Host-Host interface product PROLIFIC MOTOROLA 0x0307 Motorola Cable product PROLIFIC RSAQ2 0x04bb PL2303 Serial (IODATA USB-RSAQ2) product PROLIFIC ALLTRONIX_GPRS 0x0609 Alltronix ACM003U00 modem product PROLIFIC ALDIGA_AL11U 0x0611 AlDiga AL-11U modem product PROLIFIC MICROMAX_610U 0x0612 Micromax 610U product PROLIFIC DCU11 0x1234 DCU-11 Phone Cable product PROLIFIC UIC_MSR206 0x206a UIC MSR206 Card Reader product PROLIFIC PL2303 0x2303 PL2303 Serial (ATEN/IOGEAR UC232A) product PROLIFIC PL2305 0x2305 Parallel printer product PROLIFIC ATAPI4 0x2307 ATAPI-4 Controller product PROLIFIC PL2501 0x2501 PL2501 Host-Host interface product PROLIFIC PL2506 0x2506 PL2506 USB to IDE Bridge product PROLIFIC HCR331 0x331a HCR331 Hybrid Card Reader product PROLIFIC PHAROS 0xaaa0 Prolific Pharos product PROLIFIC RSAQ3 0xaaa2 PL2303 Serial Adapter (IODATA USB-RSAQ3) product PROLIFIC2 PL2303 0x2303 PL2303 Serial Adapter /* Putercom products */ product PUTERCOM UPA100 0x047e USB-1284 BRIDGE /* Qcom products */ product QCOM RT2573 0x6196 RT2573 product QCOM RT2573_2 0x6229 RT2573 product QCOM RT2573_3 0x6238 RT2573 product QCOM RT2870 0x6259 RT2870 /* QI-hardware */ product QIHARDWARE JTAGSERIAL 0x0713 FTDI compatible adapter /* Qisda products */ product QISDA H21_1 0x4512 3G modem product QISDA H21_2 0x4523 3G modem product QISDA H20_1 0x4515 3G modem product QISDA H20_2 0x4519 3G modem /* Qualcomm products */ product QUALCOMM CDMA_MSM 0x6000 CDMA Technologies MSM phone product QUALCOMM NTT_L02C_MODEM 0x618f NTT DOCOMO L-02C product QUALCOMM NTT_L02C_STORAGE 0x61dd NTT DOCOMO L-02C product QUALCOMM2 MF330 0x6613 MF330 product QUALCOMM2 RWT_FCT 0x3100 RWT FCT-CDMA 2000 1xRTT modem product QUALCOMM2 CDMA_MSM 0x3196 CDMA Technologies MSM modem product QUALCOMM2 AC8700 0x6000 AC8700 product QUALCOMM2 VW110L 0x1000 Vertex Wireless 110L modem product QUALCOMM2 SIM5218 0x9000 SIM5218 product QUALCOMM2 WM620 0x9002 Neoway WM620 product QUALCOMM2 GOBI2000_QDL 0x9204 Qualcomm Gobi 2000 QDL product QUALCOMM2 GOBI2000 0x9205 Qualcomm Gobi 2000 modem product QUALCOMM2 VT80N 0x6500 Venus VT80N product QUALCOMM3 VFAST2 0x9909 Venus Fast2 modem product QUALCOMMINC CDMA_MSM 0x0001 CDMA Technologies MSM modem product QUALCOMMINC E0002 0x0002 3G modem product QUALCOMMINC E0003 0x0003 3G modem product QUALCOMMINC E0004 0x0004 3G modem product QUALCOMMINC E0005 0x0005 3G modem product QUALCOMMINC E0006 0x0006 3G modem product QUALCOMMINC E0007 0x0007 3G modem product QUALCOMMINC E0008 0x0008 3G modem product QUALCOMMINC E0009 0x0009 3G modem product QUALCOMMINC E000A 0x000a 3G modem product QUALCOMMINC E000B 0x000b 3G modem product QUALCOMMINC E000C 0x000c 3G modem product QUALCOMMINC E000D 0x000d 3G modem product QUALCOMMINC E000E 0x000e 3G modem product QUALCOMMINC E000F 0x000f 3G modem product QUALCOMMINC E0010 0x0010 3G modem product QUALCOMMINC E0011 0x0011 3G modem product QUALCOMMINC E0012 0x0012 3G modem product QUALCOMMINC E0013 0x0013 3G modem product QUALCOMMINC E0014 0x0014 3G modem product QUALCOMMINC MF628 0x0015 3G modem product QUALCOMMINC MF633R 0x0016 ZTE WCDMA modem product QUALCOMMINC E0017 0x0017 3G modem product QUALCOMMINC E0018 0x0018 3G modem product QUALCOMMINC E0019 0x0019 3G modem product QUALCOMMINC E0020 0x0020 3G modem product QUALCOMMINC E0021 0x0021 3G modem product QUALCOMMINC E0022 0x0022 3G modem product QUALCOMMINC E0023 0x0023 3G modem product QUALCOMMINC E0024 0x0024 3G modem product QUALCOMMINC E0025 0x0025 3G modem product QUALCOMMINC E0026 0x0026 3G modem product QUALCOMMINC E0027 0x0027 3G modem product QUALCOMMINC E0028 0x0028 3G modem product QUALCOMMINC E0029 0x0029 3G modem product QUALCOMMINC E0030 0x0030 3G modem product QUALCOMMINC MF626 0x0031 3G modem product QUALCOMMINC E0032 0x0032 3G modem product QUALCOMMINC E0033 0x0033 3G modem product QUALCOMMINC E0037 0x0037 3G modem product QUALCOMMINC E0039 0x0039 3G modem product QUALCOMMINC E0042 0x0042 3G modem product QUALCOMMINC E0043 0x0043 3G modem product QUALCOMMINC E0048 0x0048 3G modem product QUALCOMMINC E0049 0x0049 3G modem product QUALCOMMINC E0051 0x0051 3G modem product QUALCOMMINC E0052 0x0052 3G modem product QUALCOMMINC ZTE_STOR2 0x0053 USB ZTE Storage product QUALCOMMINC E0054 0x0054 3G modem product QUALCOMMINC E0055 0x0055 3G modem product QUALCOMMINC E0057 0x0057 3G modem product QUALCOMMINC E0058 0x0058 3G modem product QUALCOMMINC E0059 0x0059 3G modem product QUALCOMMINC E0060 0x0060 3G modem product QUALCOMMINC E0061 0x0061 3G modem product QUALCOMMINC E0062 0x0062 3G modem product QUALCOMMINC E0063 0x0063 3G modem product QUALCOMMINC E0064 0x0064 3G modem product QUALCOMMINC E0066 0x0066 3G modem product QUALCOMMINC E0069 0x0069 3G modem product QUALCOMMINC E0070 0x0070 3G modem product QUALCOMMINC E0073 0x0073 3G modem product QUALCOMMINC E0076 0x0076 3G modem product QUALCOMMINC E0078 0x0078 3G modem product QUALCOMMINC E0082 0x0082 3G modem product QUALCOMMINC E0086 0x0086 3G modem product QUALCOMMINC MF112 0x0103 3G modem product QUALCOMMINC SURFSTICK 0x0117 1&1 Surf Stick product QUALCOMMINC K3772_Z_INIT 0x1179 K3772-Z Initial product QUALCOMMINC K3772_Z 0x1181 K3772-Z product QUALCOMMINC ZTE_MF730M 0x1420 3G modem product QUALCOMMINC MF195E_INIT 0x1514 MF195E initial product QUALCOMMINC MF195E 0x1516 MF195E product QUALCOMMINC ZTE_STOR 0x2000 USB ZTE Storage product QUALCOMMINC E2002 0x2002 3G modem product QUALCOMMINC E2003 0x2003 3G modem product QUALCOMMINC AC682 0xffdd CDMA 1xEVDO USB modem product QUALCOMMINC AC682_INIT 0xffde CDMA 1xEVDO USB modem (initial) product QUALCOMMINC AC8710 0xfff1 3G modem product QUALCOMMINC AC2726 0xfff5 3G modem product QUALCOMMINC AC8700 0xfffe CDMA 1xEVDO USB modem /* Quanta products */ product QUANTA RW6815_1 0x00ce HP iPAQ rw6815 product QUANTA RT3070 0x0304 RT3070 product QUANTA Q101_STOR 0x1000 USB Q101 Storage product QUANTA Q101 0xea02 HSDPA modem product QUANTA Q111 0xea03 HSDPA modem product QUANTA GLX 0xea04 HSDPA modem product QUANTA GKE 0xea05 HSDPA modem product QUANTA GLE 0xea06 HSDPA modem product QUANTA RW6815R 0xf003 HP iPAQ rw6815 RNDIS /* Qtronix products */ product QTRONIX 980N 0x2011 Scorpion-980N keyboard /* Quickshot products */ product QUICKSHOT STRIKEPAD 0x6238 USB StrikePad /* Radio Shack */ product RADIOSHACK USBCABLE 0x4026 USB to Serial Cable /* Rainbow Technologies products */ product RAINBOW IKEY2000 0x1200 i-Key 2000 /* Ralink Technology products */ product RALINK RT2570 0x1706 RT2500USB Wireless Adapter product RALINK RT2070 0x2070 RT2070 product RALINK RT2570_2 0x2570 RT2500USB Wireless Adapter product RALINK RT2573 0x2573 RT2501USB Wireless Adapter product RALINK RT2671 0x2671 RT2601USB Wireless Adapter product RALINK RT2770 0x2770 RT2770 product RALINK RT2870 0x2870 RT2870 product RALINK RT_STOR 0x2878 USB Storage product RALINK RT3070 0x3070 RT3070 product RALINK RT3071 0x3071 RT3071 product RALINK RT3072 0x3072 RT3072 product RALINK RT3370 0x3370 RT3370 product RALINK RT3572 0x3572 RT3572 product RALINK RT3573 0x3573 RT3573 product RALINK RT5370 0x5370 RT5370 product RALINK RT5572 0x5572 RT5572 product RALINK RT8070 0x8070 RT8070 product RALINK RT2570_3 0x9020 RT2500USB Wireless Adapter product RALINK RT2573_2 0x9021 RT2501USB Wireless Adapter /* RATOC Systems products */ product RATOC REXUSB60 0xb000 USB serial adapter REX-USB60 product RATOC REXUSB60F 0xb020 USB serial adapter REX-USB60F /* Realtek products */ /* Green House and CompUSA OEM this part */ product REALTEK DUMMY 0x0000 Dummy product product REALTEK USB20CRW 0x0158 USB20CRW Card Reader product REALTEK RTL8188ETV 0x0179 RTL8188ETV product REALTEK RTL8188CTV 0x018a RTL8188CTV +product REALTEK RTL8188RU_2 0x317f RTL8188RU product REALTEK USBKR100 0x8150 USBKR100 USB Ethernet product REALTEK RTL8152 0x8152 RTL8152 USB Ethernet product REALTEK RTL8153 0x8153 RTL8153 USB Ethernet product REALTEK RTL8188CE_0 0x8170 RTL8188CE product REALTEK RTL8171 0x8171 RTL8171 product REALTEK RTL8172 0x8172 RTL8172 product REALTEK RTL8173 0x8173 RTL8173 product REALTEK RTL8174 0x8174 RTL8174 product REALTEK RTL8188CU_0 0x8176 RTL8188CU -product REALTEK RTL8192CU_1 0x8178 RTL8192CU +product REALTEK RTL8191CU 0x8177 RTL8191CU +product REALTEK RTL8192CU 0x8178 RTL8192CU product REALTEK RTL8188EU 0x8179 RTL8188EU -product REALTEK RTL8188CE_1 0x817e RTL8188CE product REALTEK RTL8188CU_1 0x817a RTL8188CU product REALTEK RTL8188CU_2 0x817b RTL8188CU +product REALTEK RTL8192CE 0x817c RTL8192CE +product REALTEK RTL8188RU_1 0x817d RTL8188RU +product REALTEK RTL8188CE_1 0x817e RTL8188CE +product REALTEK RTL8188RU_3 0x817f RTL8188RU product REALTEK RTL8187 0x8187 RTL8187 Wireless Adapter product REALTEK RTL8187B_0 0x8189 RTL8187B Wireless Adapter +product REALTEK RTL8188CUS 0x818a RTL8188CUS +product REALTEK RTL8192EU 0x818b RTL8192EU product REALTEK RTL8188CU_3 0x8191 RTL8188CU product REALTEK RTL8196EU 0x8196 RTL8196EU product REALTEK RTL8187B_1 0x8197 RTL8187B Wireless Adapter product REALTEK RTL8187B_2 0x8198 RTL8187B Wireless Adapter -product REALTEK RTL8188CUS 0x818a RTL8188CUS -product REALTEK RTL8188CU_COMBO 0x8754 RTL8188CU -product REALTEK RTL8191CU 0x8177 RTL8191CU -product REALTEK RTL8192CU 0x8178 RTL8192CU -product REALTEK RTL8192CE 0x817c RTL8192CE -product REALTEK RTL8188RU_1 0x817d RTL8188RU -product REALTEK RTL8188RU_3 0x817f RTL8188RU product REALTEK RTL8712 0x8712 RTL8712 -product REALTEK RTL8713 0x8712 RTL8713 -product REALTEK RTL8188RU_2 0x317f RTL8188RU +product REALTEK RTL8713 0x8713 RTL8713 +product REALTEK RTL8188CU_COMBO 0x8754 RTL8188CU +product REALTEK RTL8723BU 0xb720 RTL8723BU product REALTEK RTL8192SU 0xc512 RTL8192SU /* RedOctane products */ product REDOCTANE DUMMY 0x0000 Dummy product product REDOCTANE GHMIDI 0x474b GH MIDI INTERFACE /* Renesas products */ product RENESAS RX610 0x0053 RX610 RX-Stick /* Ricoh products */ product RICOH VGPVCC2 0x1830 VGP-VCC2 Camera product RICOH VGPVCC3 0x1832 VGP-VCC3 Camera product RICOH VGPVCC2_2 0x1833 VGP-VCC2 Camera product RICOH VGPVCC2_3 0x1834 VGP-VCC2 Camera product RICOH VGPVCC7 0x183a VGP-VCC7 Camera product RICOH VGPVCC8 0x183b VGP-VCC8 Camera /* Reiner-SCT products */ product REINERSCT CYBERJACK_ECOM 0x0100 e-com cyberJack /* Roland products */ product ROLAND UA100 0x0000 UA-100 Audio I/F product ROLAND UM4 0x0002 UM-4 MIDI I/F product ROLAND SC8850 0x0003 SC-8850 MIDI Synth product ROLAND U8 0x0004 U-8 Audio I/F product ROLAND UM2 0x0005 UM-2 MIDI I/F product ROLAND SC8820 0x0007 SC-8820 MIDI Synth product ROLAND PC300 0x0008 PC-300 MIDI Keyboard product ROLAND UM1 0x0009 UM-1 MIDI I/F product ROLAND SK500 0x000b SK-500 MIDI Keyboard product ROLAND SCD70 0x000c SC-D70 MIDI Synth product ROLAND UM880N 0x0014 EDIROL UM-880 MIDI I/F (native) product ROLAND UM880G 0x0015 EDIROL UM-880 MIDI I/F (generic) product ROLAND SD90 0x0016 SD-90 MIDI Synth product ROLAND UM550 0x0023 UM-550 MIDI I/F product ROLAND SD20 0x0027 SD-20 MIDI Synth product ROLAND SD80 0x0029 SD-80 MIDI Synth product ROLAND UA700 0x002b UA-700 Audio I/F /* Rockfire products */ product ROCKFIRE GAMEPAD 0x2033 gamepad 203USB /* RATOC Systems products */ product RATOC REXUSB60 0xb000 REX-USB60 product RATOC REXUSB60F 0xb020 REX-USB60F /* RT system products */ product RTSYSTEMS CT29B 0x9e54 FTDI compatible adapter product RTSYSTEMS SERIAL_VX7 0x9e52 FTDI compatible adapter /* Sagem products */ product SAGEM USBSERIAL 0x0027 USB-Serial Controller product SAGEM XG760A 0x004a XG-760A product SAGEM XG76NA 0x0062 XG-76NA /* Samsung products */ product SAMSUNG WIS09ABGN 0x2018 WIS09ABGN Wireless LAN adapter product SAMSUNG ML6060 0x3008 ML-6060 laser printer product SAMSUNG YP_U2 0x5050 YP-U2 MP3 Player product SAMSUNG YP_U4 0x5092 YP-U4 MP3 Player product SAMSUNG I500 0x6601 I500 Palm USB Phone product SAMSUNG I330 0x8001 I330 phone cradle product SAMSUNG2 RT2870_1 0x2018 RT2870 /* Samsung Techwin products */ product SAMSUNG_TECHWIN DIGIMAX_410 0x000a Digimax 410 /* SanDisk products */ product SANDISK SDDR05A 0x0001 ImageMate SDDR-05a product SANDISK SDDR31 0x0002 ImageMate SDDR-31 product SANDISK SDDR05 0x0005 ImageMate SDDR-05 product SANDISK SDDR12 0x0100 ImageMate SDDR-12 product SANDISK SDDR09 0x0200 ImageMate SDDR-09 product SANDISK SDDR75 0x0810 ImageMate SDDR-75 product SANDISK SDCZ2_128 0x7100 Cruzer Mini 128MB product SANDISK SDCZ2_256 0x7104 Cruzer Mini 256MB product SANDISK SDCZ4_128 0x7112 Cruzer Micro 128MB product SANDISK SDCZ4_256 0x7113 Cruzer Micro 256MB product SANDISK IMAGEMATE_SDDR289 0xb6ba ImageMate SDDR-289 /* Sanwa Electric Instrument Co., Ltd. products */ product SANWA KB_USB2 0x0701 KB-USB2 multimeter cable /* Sanyo Electric products */ product SANYO SCP4900 0x0701 Sanyo SCP-4900 USB Phone /* ScanLogic products */ product SCANLOGIC SL11R 0x0002 SL11R IDE Adapter product SCANLOGIC 336CX 0x0300 Phantom 336CX - C3 scanner /* Schweitzer Engineering Laboratories products */ product SEL C662 0x0001 C662 Cable /* Sealevel products */ product SEALEVEL 2101 0x2101 FTDI compatible adapter product SEALEVEL 2102 0x2102 FTDI compatible adapter product SEALEVEL 2103 0x2103 FTDI compatible adapter product SEALEVEL 2104 0x2104 FTDI compatible adapter product SEALEVEL 2106 0x9020 FTDI compatible adapter product SEALEVEL 2201_1 0x2211 FTDI compatible adapter product SEALEVEL 2201_2 0x2221 FTDI compatible adapter product SEALEVEL 2202_1 0x2212 FTDI compatible adapter product SEALEVEL 2202_2 0x2222 FTDI compatible adapter product SEALEVEL 2203_1 0x2213 FTDI compatible adapter product SEALEVEL 2203_2 0x2223 FTDI compatible adapter product SEALEVEL 2401_1 0x2411 FTDI compatible adapter product SEALEVEL 2401_2 0x2421 FTDI compatible adapter product SEALEVEL 2401_3 0x2431 FTDI compatible adapter product SEALEVEL 2401_4 0x2441 FTDI compatible adapter product SEALEVEL 2402_1 0x2412 FTDI compatible adapter product SEALEVEL 2402_2 0x2422 FTDI compatible adapter product SEALEVEL 2402_3 0x2432 FTDI compatible adapter product SEALEVEL 2402_4 0x2442 FTDI compatible adapter product SEALEVEL 2403_1 0x2413 FTDI compatible adapter product SEALEVEL 2403_2 0x2423 FTDI compatible adapter product SEALEVEL 2403_3 0x2433 FTDI compatible adapter product SEALEVEL 2403_4 0x2443 FTDI compatible adapter product SEALEVEL 2801_1 0x2811 FTDI compatible adapter product SEALEVEL 2801_2 0x2821 FTDI compatible adapter product SEALEVEL 2801_3 0x2831 FTDI compatible adapter product SEALEVEL 2801_4 0x2841 FTDI compatible adapter product SEALEVEL 2801_5 0x2851 FTDI compatible adapter product SEALEVEL 2801_6 0x2861 FTDI compatible adapter product SEALEVEL 2801_7 0x2871 FTDI compatible adapter product SEALEVEL 2801_8 0x2881 FTDI compatible adapter product SEALEVEL 2802_1 0x2812 FTDI compatible adapter product SEALEVEL 2802_2 0x2822 FTDI compatible adapter product SEALEVEL 2802_3 0x2832 FTDI compatible adapter product SEALEVEL 2802_4 0x2842 FTDI compatible adapter product SEALEVEL 2802_5 0x2852 FTDI compatible adapter product SEALEVEL 2802_6 0x2862 FTDI compatible adapter product SEALEVEL 2802_7 0x2872 FTDI compatible adapter product SEALEVEL 2802_8 0x2882 FTDI compatible adapter product SEALEVEL 2803_1 0x2813 FTDI compatible adapter product SEALEVEL 2803_2 0x2823 FTDI compatible adapter product SEALEVEL 2803_3 0x2833 FTDI compatible adapter product SEALEVEL 2803_4 0x2843 FTDI compatible adapter product SEALEVEL 2803_5 0x2853 FTDI compatible adapter product SEALEVEL 2803_6 0x2863 FTDI compatible adapter product SEALEVEL 2803_7 0x2873 FTDI compatible adapter product SEALEVEL 2803_8 0x2883 FTDI compatible adapter /* Senao products */ product SENAO EUB1200AC 0x0100 EnGenius EUB1200AC product SENAO RT2870_3 0x0605 RT2870 product SENAO RT2870_4 0x0615 RT2870 product SENAO NUB8301 0x2000 NUB-8301 product SENAO RT2870_1 0x9701 RT2870 product SENAO RT2870_2 0x9702 RT2870 product SENAO RT3070 0x9703 RT3070 product SENAO RT3071 0x9705 RT3071 product SENAO RT3072_1 0x9706 RT3072 product SENAO RT3072_2 0x9707 RT3072 product SENAO RT3072_3 0x9708 RT3072 product SENAO RT3072_4 0x9709 RT3072 product SENAO RT3072_5 0x9801 RT3072 product SENAO RTL8192SU_1 0x9603 RTL8192SU product SENAO RTL8192SU_2 0x9605 RTL8192SU /* ShanTou products */ product SHANTOU ST268 0x0268 ST268 product SHANTOU DM9601 0x9601 DM 9601 product SHANTOU ADM8515 0x8515 ADM8515 /* Shark products */ product SHARK PA 0x0400 Pocket Adapter /* Sharp products */ product SHARP SL5500 0x8004 Zaurus SL-5500 PDA product SHARP SLA300 0x8005 Zaurus SL-A300 PDA product SHARP SL5600 0x8006 Zaurus SL-5600 PDA product SHARP SLC700 0x8007 Zaurus SL-C700 PDA product SHARP SLC750 0x9031 Zaurus SL-C750 PDA product SHARP WZERO3ES 0x9123 W-ZERO3 ES Smartphone product SHARP WZERO3ADES 0x91ac Advanced W-ZERO3 ES Smartphone product SHARP WILLCOM03 0x9242 WILLCOM03 /* Shuttle Technology products */ product SHUTTLE EUSB 0x0001 E-USB Bridge product SHUTTLE EUSCSI 0x0002 eUSCSI Bridge product SHUTTLE SDDR09 0x0003 ImageMate SDDR09 product SHUTTLE EUSBCFSM 0x0005 eUSB SmartMedia / CompactFlash Adapter product SHUTTLE ZIOMMC 0x0006 eUSB MultiMediaCard Adapter product SHUTTLE HIFD 0x0007 Sony Hifd product SHUTTLE EUSBATAPI 0x0009 eUSB ATA/ATAPI Adapter product SHUTTLE CF 0x000a eUSB CompactFlash Adapter product SHUTTLE EUSCSI_B 0x000b eUSCSI Bridge product SHUTTLE EUSCSI_C 0x000c eUSCSI Bridge product SHUTTLE CDRW 0x0101 CD-RW Device product SHUTTLE EUSBORCA 0x0325 eUSB ORCA Quad Reader /* Siemens products */ product SIEMENS SPEEDSTREAM 0x1001 SpeedStream product SIEMENS SPEEDSTREAM22 0x1022 SpeedStream 1022 product SIEMENS2 WLL013 0x001b WLL013 product SIEMENS2 ES75 0x0034 GSM module MC35 product SIEMENS2 WL54G 0x3c06 54g USB Network Adapter product SIEMENS3 SX1 0x0001 SX1 product SIEMENS3 X65 0x0003 X65 product SIEMENS3 X75 0x0004 X75 product SIEMENS3 EF81 0x0005 EF81 /* Sierra Wireless products */ product SIERRA EM5625 0x0017 EM5625 product SIERRA MC5720_2 0x0018 MC5720 product SIERRA MC5725 0x0020 MC5725 product SIERRA AIRCARD580 0x0112 Sierra Wireless AirCard 580 product SIERRA AIRCARD595 0x0019 Sierra Wireless AirCard 595 product SIERRA AC595U 0x0120 Sierra Wireless AirCard 595U product SIERRA AC597E 0x0021 Sierra Wireless AirCard 597E product SIERRA EM5725 0x0022 EM5725 product SIERRA C597 0x0023 Sierra Wireless Compass 597 product SIERRA MC5727 0x0024 MC5727 product SIERRA T598 0x0025 T598 product SIERRA T11 0x0026 T11 product SIERRA AC402 0x0027 AC402 product SIERRA MC5728 0x0028 MC5728 product SIERRA E0029 0x0029 E0029 product SIERRA AIRCARD580 0x0112 Sierra Wireless AirCard 580 product SIERRA AC595U 0x0120 Sierra Wireless AirCard 595U product SIERRA MC5720 0x0218 MC5720 Wireless Modem product SIERRA MINI5725 0x0220 Sierra Wireless miniPCI 5275 product SIERRA MC5727_2 0x0224 MC5727 product SIERRA MC8755_2 0x6802 MC8755 product SIERRA MC8765 0x6803 MC8765 product SIERRA MC8755 0x6804 MC8755 product SIERRA MC8765_2 0x6805 MC8765 product SIERRA MC8755_4 0x6808 MC8755 product SIERRA MC8765_3 0x6809 MC8765 product SIERRA AC875U 0x6812 AC875U HSDPA USB Modem product SIERRA MC8755_3 0x6813 MC8755 HSDPA product SIERRA MC8775_2 0x6815 MC8775 product SIERRA MC8775 0x6816 MC8775 product SIERRA AC875 0x6820 Sierra Wireless AirCard 875 product SIERRA AC875U_2 0x6821 AC875U product SIERRA AC875E 0x6822 AC875E product SIERRA MC8780 0x6832 MC8780 product SIERRA MC8781 0x6833 MC8781 product SIERRA MC8780_2 0x6834 MC8780 product SIERRA MC8781_2 0x6835 MC8781 product SIERRA MC8780_3 0x6838 MC8780 product SIERRA MC8781_3 0x6839 MC8781 product SIERRA MC8785 0x683A MC8785 product SIERRA MC8785_2 0x683B MC8785 product SIERRA MC8790 0x683C MC8790 product SIERRA MC8791 0x683D MC8791 product SIERRA MC8792 0x683E MC8792 product SIERRA AC880 0x6850 Sierra Wireless AirCard 880 product SIERRA AC881 0x6851 Sierra Wireless AirCard 881 product SIERRA AC880E 0x6852 Sierra Wireless AirCard 880E product SIERRA AC881E 0x6853 Sierra Wireless AirCard 881E product SIERRA AC880U 0x6855 Sierra Wireless AirCard 880U product SIERRA AC881U 0x6856 Sierra Wireless AirCard 881U product SIERRA AC885E 0x6859 AC885E product SIERRA AC885E_2 0x685A AC885E product SIERRA AC885U 0x6880 Sierra Wireless AirCard 885U product SIERRA C888 0x6890 C888 product SIERRA C22 0x6891 C22 product SIERRA E6892 0x6892 E6892 product SIERRA E6893 0x6893 E6893 product SIERRA MC8700 0x68A3 MC8700 product SIERRA MC7354 0x68C0 MC7354 product SIERRA MC7355 0x9041 MC7355 product SIERRA MC7430 0x9071 Sierra Wireless MC7430 Qualcomm Snapdragon X7 LTE-A product SIERRA AC313U 0x68aa Sierra Wireless AirCard 313U product SIERRA TRUINSTALL 0x0fff Aircard Tru Installer /* Sigmatel products */ product SIGMATEL WBT_3052 0x4200 WBT-3052 IrDA/USB Bridge product SIGMATEL I_BEAD100 0x8008 i-Bead 100 MP3 Player /* SIIG products */ /* Also: Omnidirectional Control Technology products */ product SIIG DIGIFILMREADER 0x0004 DigiFilm-Combo Reader product SIIG WINTERREADER 0x0330 WINTERREADER Reader product SIIG2 DK201 0x0103 FTDI compatible adapter product SIIG2 USBTOETHER 0x0109 USB TO Ethernet product SIIG2 US2308 0x0421 Serial /* Silicom products */ product SILICOM U2E 0x0001 U2E product SILICOM GPE 0x0002 Psion Gold Port Ethernet /* SI Labs */ product SILABS VSTABI 0x0f91 VStabi Controller product SILABS ARKHAM_DS101_M 0x1101 Arkham DS101 Monitor product SILABS ARKHAM_DS101_A 0x1601 Arkham DS101 Adapter product SILABS BSM7DUSB 0x800a SPORTident BSM7-D USB product SILABS POLOLU 0x803b Pololu Serial product SILABS CYGNAL_DEBUG 0x8044 Cygnal Debug Adapter product SILABS SB_PARAMOUNT_ME 0x8043 Software Bisque Paramount ME product SILABS SAEL 0x8053 SA-EL USB product SILABS GSM2228 0x8054 Enfora GSM2228 USB product SILABS ARGUSISP 0x8066 Argussoft ISP product SILABS IMS_USB_RS422 0x806f IMS USB-RS422 product SILABS CRUMB128 0x807a Crumb128 board product SILABS OPTRIS_MSPRO 0x80c4 Optris MSpro LT Thermometer product SILABS DEGREE 0x80ca Degree Controls Inc product SILABS TRACIENT 0x80dd Tracient RFID product SILABS TRAQMATE 0x80ed Track Systems Traqmate product SILABS SUUNTO 0x80f6 Suunto Sports Instrument product SILABS ARYGON_MIFARE 0x8115 Arygon Mifare RFID reader product SILABS BURNSIDE 0x813d Burnside Telecon Deskmobile product SILABS TAMSMASTER 0x813f Tams Master Easy Control product SILABS WMRBATT 0x814a WMR RIGblaster Plug&Play product SILABS WMRRIGBLASTER 0x814a WMR RIGblaster Plug&Play product SILABS WMRRIGTALK 0x814b WMR RIGtalk RT1 product SILABS B_G_H3000 0x8156 B&G H3000 Data Cable product SILABS HELICOM 0x815e Helicomm IP-Link 1220-DVM product SILABS HAMLINKUSB 0x815f Timewave HamLinkUSB product SILABS AVIT_USB_TTL 0x818b AVIT Research USB-TTL product SILABS MJS_TOSLINK 0x819f MJS USB-TOSLINK product SILABS WAVIT 0x81a6 ThinkOptics WavIt product SILABS MULTIPLEX_RC 0x81a9 Multiplex RC adapter product SILABS MSD_DASHHAWK 0x81ac MSD DashHawk product SILABS INSYS_MODEM 0x81ad INSYS Modem product SILABS LIPOWSKY_JTAG 0x81c8 Lipowsky Baby-JTAG product SILABS LIPOWSKY_LIN 0x81e2 Lipowsky Baby-LIN product SILABS AEROCOMM 0x81e7 Aerocomm Radio product SILABS ZEPHYR_BIO 0x81e8 Zephyr Bioharness product SILABS EMS_C1007 0x81f2 EMS C1007 HF RFID controller product SILABS LIPOWSKY_HARP 0x8218 Lipowsky HARP-1 product SILABS C2_EDGE_MODEM 0x822b Commander 2 EDGE(GSM) Modem product SILABS CYGNAL_GPS 0x826b Cygnal Fasttrax GPS product SILABS TELEGESIS_ETRX2 0x8293 Telegesis ETRX2USB product SILABS PROCYON_AVS 0x82f9 Procyon AVS product SILABS MC35PU 0x8341 MC35pu product SILABS CYGNAL 0x8382 Cygnal product SILABS AMBER_AMB2560 0x83a8 Amber Wireless AMB2560 product SILABS DEKTEK_DTAPLUS 0x83d8 DekTec DTA Plus VHF/UHF Booster product SILABS KYOCERA_GPS 0x8411 Kyocera GPS product SILABS IRZ_SG10 0x8418 IRZ SG-10 GSM/GPRS Modem product SILABS BEI_VCP 0x846e BEI USB Sensor (VCP) product SILABS BALLUFF_RFID 0x8477 Balluff RFID reader product SILABS AC_SERV_IBUS 0x85ea AC-Services IBUS Interface product SILABS AC_SERV_CIS 0x85eb AC-Services CIS-IBUS product SILABS V_PREON32 0x85f8 Virtenio Preon32 product SILABS AC_SERV_CAN 0x8664 AC-Services CAN Interface product SILABS AC_SERV_OBD 0x8665 AC-Services OBD Interface product SILABS MMB_ZIGBEE 0x88a4 MMB Networks ZigBee product SILABS INGENI_ZIGBEE 0x88a5 Planet Innovation Ingeni ZigBee product SILABS CP2102 0xea60 SILABS USB UART product SILABS CP210X_2 0xea61 CP210x Serial product SILABS CP210X_3 0xea70 CP210x Serial product SILABS CP210X_4 0xea80 CP210x Serial product SILABS INFINITY_MIC 0xea71 Infinity GPS-MIC-1 Radio Monophone product SILABS USBSCOPE50 0xf001 USBscope50 product SILABS USBWAVE12 0xf002 USBwave12 product SILABS USBPULSE100 0xf003 USBpulse100 product SILABS USBCOUNT50 0xf004 USBcount50 product SILABS2 DCU11CLONE 0xaa26 DCU-11 clone product SILABS3 GPRS_MODEM 0xea61 GPRS Modem product SILABS4 100EU_MODEM 0xea61 GPRS Modem 100EU /* Silicon Portals Inc. */ product SILICONPORTALS YAPPH_NF 0x0200 YAP Phone (no firmware) product SILICONPORTALS YAPPHONE 0x0201 YAP Phone /* Sirius Technologies products */ product SIRIUS ROADSTER 0x0001 NetComm Roadster II 56 USB /* Sitecom products */ product SITECOM LN029 0x182d USB 2.0 Ethernet product SITECOM SERIAL 0x2068 USB to serial cable (v2) product SITECOM2 WL022 0x182d WL-022 /* Sitecom Europe products */ product SITECOMEU RT2870_1 0x0017 RT2870 product SITECOMEU WL168V1 0x000d WL-168 v1 product SITECOMEU LN030 0x0021 MCS7830 product SITECOMEU WL168V4 0x0028 WL-168 v4 product SITECOMEU RT2870_2 0x002b RT2870 product SITECOMEU RT2870_3 0x002c RT2870 product SITECOMEU RT2870_4 0x002d RT2870 product SITECOMEU RT2770 0x0039 RT2770 product SITECOMEU RT3070_2 0x003b RT3070 product SITECOMEU RT3070_3 0x003c RT3070 product SITECOMEU RT3070_4 0x003d RT3070 product SITECOMEU RT3070 0x003e RT3070 product SITECOMEU WL608 0x003f WL-608 product SITECOMEU RT3071 0x0040 RT3071 product SITECOMEU RT3072_1 0x0041 RT3072 product SITECOMEU RT3072_2 0x0042 RT3072 product SITECOMEU WL353 0x0045 WL-353 product SITECOMEU RT3072_3 0x0047 RT3072 product SITECOMEU RT3072_4 0x0048 RT3072 product SITECOMEU RT3072_5 0x004a RT3072 product SITECOMEU WL349V1 0x004b WL-349 v1 product SITECOMEU RT3072_6 0x004d RT3072 product SITECOMEU RTL8188CU_1 0x0052 RTL8188CU product SITECOMEU RTL8188CU_2 0x005c RTL8188CU product SITECOMEU RTL8192CU 0x0061 RTL8192CU product SITECOMEU LN032 0x0072 LN-032 product SITECOMEU WLA7100 0x0074 WLA-7100 product SITECOMEU LN031 0x0056 LN-031 product SITECOMEU LN028 0x061c LN-028 product SITECOMEU WL113 0x9071 WL-113 product SITECOMEU ZD1211B 0x9075 ZD1211B product SITECOMEU WL172 0x90ac WL-172 product SITECOMEU WL113R2 0x9712 WL-113 rev 2 /* Skanhex Technology products */ product SKANHEX MD_7425 0x410a MD 7425 Camera product SKANHEX SX_520Z 0x5200 SX 520z Camera /* Smart Technologies products */ product SMART PL2303 0x2303 Serial adapter /* SmartBridges products */ product SMARTBRIDGES SMARTLINK 0x0001 SmartLink USB Ethernet product SMARTBRIDGES SMARTNIC 0x0003 smartNIC 2 PnP Ethernet /* SMC products */ product SMC 2102USB 0x0100 10Mbps Ethernet product SMC 2202USB 0x0200 10/100 Ethernet product SMC 2206USB 0x0201 EZ Connect USB Ethernet product SMC 2862WG 0xee13 EZ Connect Wireless Adapter product SMC2 2020HUB 0x2020 USB Hub product SMC2 2514HUB 0x2514 USB Hub product SMC3 2662WUSB 0xa002 2662W-AR Wireless product SMC2 LAN9500_ETH 0x9500 USB/Ethernet product SMC2 LAN9505_ETH 0x9505 USB/Ethernet product SMC2 LAN9530_ETH 0x9530 USB/Ethernet product SMC2 LAN9730_ETH 0x9730 USB/Ethernet product SMC2 LAN9500_SAL10 0x9900 USB/Ethernet product SMC2 LAN9505_SAL10 0x9901 USB/Ethernet product SMC2 LAN9500A_SAL10 0x9902 USB/Ethernet product SMC2 LAN9505A_SAL10 0x9903 USB/Ethernet product SMC2 LAN9514_SAL10 0x9904 USB/Ethernet product SMC2 LAN9500A_HAL 0x9905 USB/Ethernet product SMC2 LAN9505A_HAL 0x9906 USB/Ethernet product SMC2 LAN9500_ETH_2 0x9907 USB/Ethernet product SMC2 LAN9500A_ETH_2 0x9908 USB/Ethernet product SMC2 LAN9514_ETH_2 0x9909 USB/Ethernet product SMC2 LAN9500A_ETH 0x9e00 USB/Ethernet product SMC2 LAN9505A_ETH 0x9e01 USB/Ethernet product SMC2 LAN89530_ETH 0x9e08 USB/Ethernet product SMC2 LAN9514_ETH 0xec00 USB/Ethernet /* SOHOware products */ product SOHOWARE NUB100 0x9100 10/100 USB Ethernet product SOHOWARE NUB110 0x9110 10/100 USB Ethernet /* SOLID YEAR products */ product SOLIDYEAR KEYBOARD 0x2101 Solid Year USB keyboard /* SONY products */ product SONY DSC 0x0010 DSC cameras product SONY MS_NW_MS7 0x0025 Memorystick NW-MS7 product SONY PORTABLE_HDD_V2 0x002b Portable USB Harddrive V2 product SONY MSACUS1 0x002d Memorystick MSAC-US1 product SONY HANDYCAM 0x002e Handycam product SONY MSC 0x0032 MSC memory stick slot product SONY CLIE_35 0x0038 Sony Clie v3.5 product SONY MS_PEG_N760C 0x0058 PEG N760c Memorystick product SONY CLIE_40 0x0066 Sony Clie v4.0 product SONY MS_MSC_U03 0x0069 Memorystick MSC-U03 product SONY CLIE_40_MS 0x006d Sony Clie v4.0 Memory Stick slot product SONY CLIE_S360 0x0095 Sony Clie s360 product SONY CLIE_41_MS 0x0099 Sony Clie v4.1 Memory Stick slot product SONY CLIE_41 0x009a Sony Clie v4.1 product SONY CLIE_NX60 0x00da Sony Clie nx60 product SONY CLIE_TH55 0x0144 Sony Clie th55 product SONY CLIE_TJ37 0x0169 Sony Clie tj37 product SONY RF_RECEIVER 0x01db Sony RF mouse/kbd Receiver VGP-WRC1 product SONY QN3 0x0437 Sony QN3 CMD-Jxx phone cable /* Sony Ericsson products */ product SONYERICSSON DCU10 0x0528 DCU-10 Phone Data Cable product SONYERICSSON DATAPILOT 0x2003 Datapilot Phone Cable /* SOURCENEXT products */ product SOURCENEXT KEIKAI8 0x039f KeikaiDenwa 8 product SOURCENEXT KEIKAI8_CHG 0x012e KeikaiDenwa 8 with charger /* SparkLAN products */ product SPARKLAN RT2573 0x0004 RT2573 product SPARKLAN RT2870_1 0x0006 RT2870 product SPARKLAN RT3070 0x0010 RT3070 /* Soundgraph products */ product SOUNDGRAPH IMON_VFD 0x0044 Antec Veris Elite VFD Panel, Knob, and Remote product SOUNDGRAPH SSTONE_LC16 0xffdc Silverstone LC16 VFD Panel, Knob, and Remote /* Speed Dragon Multimedia products */ product SPEEDDRAGON MS3303H 0x110b MS3303H Serial /* Sphairon Access Systems GmbH products */ product SPHAIRON UB801R 0x0110 UB801R /* Stelera Wireless products */ product STELERA ZEROCD 0x1000 Zerocd Installer product STELERA C105 0x1002 Stelera/Bandrish C105 USB product STELERA E1003 0x1003 3G modem product STELERA E1004 0x1004 3G modem product STELERA E1005 0x1005 3G modem product STELERA E1006 0x1006 3G modem product STELERA E1007 0x1007 3G modem product STELERA E1008 0x1008 3G modem product STELERA E1009 0x1009 3G modem product STELERA E100A 0x100a 3G modem product STELERA E100B 0x100b 3G modem product STELERA E100C 0x100c 3G modem product STELERA E100D 0x100d 3G modem product STELERA E100E 0x100e 3G modem product STELERA E100F 0x100f 3G modem product STELERA E1010 0x1010 3G modem product STELERA E1011 0x1011 3G modem product STELERA E1012 0x1012 3G modem /* STMicroelectronics products */ product STMICRO BIOCPU 0x2016 Biometric Coprocessor product STMICRO COMMUNICATOR 0x7554 USB Communicator product STMICRO ST72682 0xfada USB 2.0 Flash drive controller /* STSN products */ product STSN STSN0001 0x0001 Internet Access Device /* SUN Corporation products */ product SUNTAC DS96L 0x0003 SUNTAC U-Cable type D2 product SUNTAC PS64P1 0x0005 SUNTAC U-Cable type P1 product SUNTAC VS10U 0x0009 SUNTAC Slipper U product SUNTAC IS96U 0x000a SUNTAC Ir-Trinity product SUNTAC AS64LX 0x000b SUNTAC U-Cable type A3 product SUNTAC AS144L4 0x0011 SUNTAC U-Cable type A4 /* Sun Microsystems products */ product SUN KEYBOARD_TYPE_6 0x0005 Type 6 USB keyboard product SUN KEYBOARD_TYPE_7 0x00a2 Type 7 USB keyboard /* XXX The above is a North American PC style keyboard possibly */ product SUN MOUSE 0x0100 Type 6 USB mouse product SUN KBD_HUB 0x100e Kbd Hub /* Sunplus Innovation Technology Inc. products */ product SUNPLUS USBMOUSE 0x0007 USB Optical Mouse /* Super Top products */ product SUPERTOP IDE 0x6600 USB-IDE product SUPERTOP FLASHDRIVE 0x121c extrememory Snippy /* Syntech products */ product SYNTECH CPT8001C 0x0001 CPT-8001C Barcode scanner product SYNTECH CYPHERLAB100 0x1000 CipherLab USB Barcode Scanner /* Teclast products */ product TECLAST TLC300 0x3203 USB Media Player /* Testo products */ product TESTO USB_INTERFACE 0x0001 FTDI compatible adapter /* TexTech products */ product TEXTECH DUMMY 0x0000 Dummy product product TEXTECH U2M_1 0x0101 Textech USB MIDI cable product TEXTECH U2M_2 0x1806 Textech USB MIDI cable /* The Mobility Lab products */ product TML USB_SERIAL 0x0064 FTDI compatible adapter /* Thurlby Thandar Instrument products */ product TTI QL355P 0x03e8 FTDI compatible adapter /* Supra products */ product DIAMOND2 SUPRAEXPRESS56K 0x07da Supra Express 56K modem product DIAMOND2 SUPRA2890 0x0b4a SupraMax 2890 56K Modem product DIAMOND2 RIO600USB 0x5001 Rio 600 USB product DIAMOND2 RIO800USB 0x5002 Rio 800 USB /* Surecom Technology products */ product SURECOM EP9001G2A 0x11f2 EP-9001-G rev 2A product SURECOM RT2570 0x11f3 RT2570 product SURECOM RT2573 0x31f3 RT2573 /* Sweex products */ product SWEEX ZD1211 0x1809 ZD1211 product SWEEX2 LW153 0x0153 LW153 product SWEEX2 LW154 0x0154 LW154 product SWEEX2 LW303 0x0302 LW303 product SWEEX2 LW313 0x0313 LW313 /* System TALKS, Inc. */ product SYSTEMTALKS SGCX2UL 0x1920 SGC-X2UL /* Tapwave products */ product TAPWAVE ZODIAC 0x0100 Zodiac /* Taugagreining products */ product TAUGA CAMERAMATE 0x0005 CameraMate (DPCM_USB) /* TCTMobile products */ product TCTMOBILE X060S 0x0000 X060S 3G modem product TCTMOBILE X080S 0xf000 X080S 3G modem /* TDK products */ product TDK UPA9664 0x0115 USB-PDC Adapter UPA9664 product TDK UCA1464 0x0116 USB-cdmaOne Adapter UCA1464 product TDK UHA6400 0x0117 USB-PHS Adapter UHA6400 product TDK UPA6400 0x0118 USB-PHS Adapter UPA6400 product TDK BT_DONGLE 0x0309 Bluetooth USB dongle /* TEAC products */ product TEAC FD05PUB 0x0000 FD-05PUB floppy /* Tekram Technology products */ product TEKRAM QUICKWLAN 0x1630 QuickWLAN product TEKRAM ZD1211_1 0x5630 ZD1211 product TEKRAM ZD1211_2 0x6630 ZD1211 /* Telex Communications products */ product TELEX MIC1 0x0001 Enhanced USB Microphone /* Telit products */ product TELIT UC864E 0x1003 UC864E 3G modem product TELIT UC864G 0x1004 UC864G 3G modem /* Ten X Technology, Inc. */ product TENX UAUDIO0 0xf211 USB audio headset /* Texas Intel products */ product TI UTUSB41 0x1446 UT-USB41 hub product TI TUSB2046 0x2046 TUSB2046 hub /* Thrustmaster products */ product THRUST FUSION_PAD 0xa0a3 Fusion Digital Gamepad /* TLayTech products */ product TLAYTECH TEU800 0x1682 TEU800 3G modem /* Topre Corporation products */ product TOPRE HHKB 0x0100 HHKB Professional /* Toshiba Corporation products */ product TOSHIBA POCKETPC_E740 0x0706 PocketPC e740 product TOSHIBA RT3070 0x0a07 RT3070 product TOSHIBA G450 0x0d45 G450 modem product TOSHIBA HSDPA 0x1302 G450 modem product TOSHIBA TRANSMEMORY 0x6545 USB ThumbDrive /* TP-Link products */ product TPLINK T4U 0x0101 Archer T4U +product TPLINK WN822NV4 0x0108 TL-WN822N v4 product TPLINK WN823NV2 0x0109 TL-WN823N v2 /* Trek Technology products */ product TREK THUMBDRIVE 0x1111 ThumbDrive product TREK MEMKEY 0x8888 IBM USB Memory Key product TREK THUMBDRIVE_8MB 0x9988 ThumbDrive_8MB /* TRENDnet products */ product TRENDNET RTL8192CU 0x624d RTL8192CU product TRENDNET TEW646UBH 0x646b TEW-646UBH product TRENDNET RTL8188CU 0x648b RTL8188CU product TRENDNET TEW805UB 0x805b TEW-805UB /* Tripp-Lite products */ product TRIPPLITE U209 0x2008 Serial /* Trumpion products */ product TRUMPION T33520 0x1001 T33520 USB Flash Card Controller product TRUMPION C3310 0x1100 Comotron C3310 MP3 player product TRUMPION MP3 0x1200 MP3 player /* TwinMOS */ product TWINMOS G240 0xa006 G240 product TWINMOS MDIV 0x1325 Memory Disk IV /* Ubiquam products */ product UBIQUAM UALL 0x3100 CDMA 1xRTT USB Modem (U-100/105/200/300/520) /* Ultima products */ product ULTIMA 1200UBPLUS 0x4002 1200 UB Plus scanner /* UMAX products */ product UMAX ASTRA1236U 0x0002 Astra 1236U Scanner product UMAX ASTRA1220U 0x0010 Astra 1220U Scanner product UMAX ASTRA2000U 0x0030 Astra 2000U Scanner product UMAX ASTRA2100U 0x0130 Astra 2100U Scanner product UMAX ASTRA2200U 0x0230 Astra 2200U Scanner product UMAX ASTRA3400 0x0060 Astra 3400 Scanner /* U-MEDIA Communications products */ product UMEDIA TEW444UBEU 0x3006 TEW-444UB EU product UMEDIA TEW444UBEU_NF 0x3007 TEW-444UB EU (no firmware) product UMEDIA TEW429UB_A 0x300a TEW-429UB_A product UMEDIA TEW429UB 0x300b TEW-429UB product UMEDIA TEW429UBC1 0x300d TEW-429UB C1 product UMEDIA RT2870_1 0x300e RT2870 product UMEDIA ALL0298V2 0x3204 ALL0298 v2 product UMEDIA AR5523_2 0x3205 AR5523 product UMEDIA AR5523_2_NF 0x3206 AR5523 (no firmware) /* Universal Access products */ product UNIACCESS PANACHE 0x0101 Panache Surf USB ISDN Adapter /* Unknown products */ product UNKNOWN4 NF_RIC 0x0001 FTDI compatible adapter /* USI products */ product USI MC60 0x10c5 MC60 Serial /* U.S. Robotics products */ product USR USR5422 0x0118 USR5422 WLAN product USR USR5423 0x0121 USR5423 WLAN /* VIA Technologies products */ product VIA USB2IDEBRIDGE 0x6204 USB 2.0 IDE Bridge /* VIA Labs */ product VIALABS USB30SATABRIDGE 0x0700 USB 3.0 SATA Bridge /* Vaisala products */ product VAISALA CABLE 0x0200 USB Interface cable /* Vertex products */ product VERTEX VW110L 0x0100 Vertex VW110L modem /* VidzMedia products */ product VIDZMEDIA MONSTERTV 0x4fb1 MonsterTV P2H /* Vision products */ product VISION VC6452V002 0x0002 CPiA Camera /* Visioneer products */ product VISIONEER 7600 0x0211 OneTouch 7600 product VISIONEER 5300 0x0221 OneTouch 5300 product VISIONEER 3000 0x0224 Scanport 3000 product VISIONEER 6100 0x0231 OneTouch 6100 product VISIONEER 6200 0x0311 OneTouch 6200 product VISIONEER 8100 0x0321 OneTouch 8100 product VISIONEER 8600 0x0331 OneTouch 8600 /* Vivitar products */ product VIVITAR 35XX 0x0003 Vivicam 35Xx /* VTech products */ product VTECH RT2570 0x3012 RT2570 product VTECH ZD1211B 0x3014 ZD1211B /* Wacom products */ product WACOM CT0405U 0x0000 CT-0405-U Tablet product WACOM GRAPHIRE 0x0010 Graphire product WACOM GRAPHIRE3_4X5 0x0013 Graphire 3 4x5 product WACOM INTUOSA5 0x0021 Intuos A5 product WACOM GD0912U 0x0022 Intuos 9x12 Graphics Tablet /* WAGO Kontakttechnik GmbH products */ product WAGO SERVICECABLE 0x07a6 USB Service Cable 750-923 /* WaveSense products */ product WAVESENSE JAZZ 0xaaaa Jazz blood glucose meter /* WCH products */ product WCH CH341SER 0x5523 CH341/CH340 USB-Serial Bridge product WCH2 DUMMY 0x0000 Dummy product product WCH2 CH341SER_2 0x5523 CH341/CH340 USB-Serial Bridge product WCH2 CH341SER 0x7523 CH341/CH340 USB-Serial Bridge product WCH2 U2M 0X752d CH345 USB2.0-MIDI /* West Mountain Radio products */ product WESTMOUNTAIN RIGBLASTER_ADVANTAGE 0x0003 RIGblaster Advantage /* Western Digital products */ product WESTERN COMBO 0x0200 Firewire USB Combo product WESTERN EXTHDD 0x0400 External HDD product WESTERN HUB 0x0500 USB HUB product WESTERN MYBOOK 0x0901 MyBook External HDD product WESTERN MYPASSPORT_00 0x0704 MyPassport External HDD product WESTERN MYPASSPORT_11 0x0741 MyPassport External HDD product WESTERN MYPASSPORT_01 0x0746 MyPassport External HDD product WESTERN MYPASSPORT_02 0x0748 MyPassport External HDD product WESTERN MYPASSPORT_03 0x074A MyPassport External HDD product WESTERN MYPASSPORT_04 0x074C MyPassport External HDD product WESTERN MYPASSPORT_05 0x074E MyPassport External HDD product WESTERN MYPASSPORT_06 0x07A6 MyPassport External HDD product WESTERN MYPASSPORT_07 0x07A8 MyPassport External HDD product WESTERN MYPASSPORT_08 0x07AA MyPassport External HDD product WESTERN MYPASSPORT_09 0x07AC MyPassport External HDD product WESTERN MYPASSPORT_10 0x07AE MyPassport External HDD product WESTERN MYPASSPORTES_00 0x070A MyPassport Essential External HDD product WESTERN MYPASSPORTES_01 0x071A MyPassport Essential External HDD product WESTERN MYPASSPORTES_02 0x0730 MyPassport Essential External HDD product WESTERN MYPASSPORTES_03 0x0732 MyPassport Essential External HDD product WESTERN MYPASSPORTES_04 0x0740 MyPassport Essential External HDD product WESTERN MYPASSPORTES_05 0x0742 MyPassport Essential External HDD product WESTERN MYPASSPORTES_06 0x0750 MyPassport Essential External HDD product WESTERN MYPASSPORTES_07 0x0752 MyPassport Essential External HDD product WESTERN MYPASSPORTES_08 0x07A0 MyPassport Essential External HDD product WESTERN MYPASSPORTES_09 0x07A2 MyPassport Essential External HDD /* WeTelecom products */ product WETELECOM WM_D200 0x6801 WM-D200 /* WIENER Plein & Baus GmbH products */ product WIENERPLEINBAUS PL512 0x0010 PL512 PSU product WIENERPLEINBAUS RCM 0x0011 RCM Remote Control product WIENERPLEINBAUS MPOD 0x0012 MPOD PSU product WIENERPLEINBAUS CML 0x0015 CML Data Logger /* Windbond Electronics */ product WINBOND UH104 0x5518 4-port USB Hub /* WinMaxGroup products */ product WINMAXGROUP FLASH64MC 0x6660 USB Flash Disk 64M-C /* Wistron NeWeb products */ product WISTRONNEWEB WNC0600 0x0326 WNC-0600USB product WISTRONNEWEB UR045G 0x0427 PrismGT USB 2.0 WLAN product WISTRONNEWEB UR055G 0x0711 UR055G product WISTRONNEWEB O8494 0x0804 ORiNOCO 802.11n product WISTRONNEWEB AR5523_1 0x0826 AR5523 product WISTRONNEWEB AR5523_1_NF 0x0827 AR5523 (no firmware) product WISTRONNEWEB AR5523_2 0x082a AR5523 product WISTRONNEWEB AR5523_2_NF 0x0829 AR5523 (no firmware) /* Xerox products */ product XEROX WCM15 0xffef WorkCenter M15 /* Xirlink products */ product XIRLINK PCCAM 0x8080 IBM PC Camera /* Xyratex products */ product XYRATEX PRISM_GT_1 0x2000 PrismGT USB 2.0 WLAN product XYRATEX PRISM_GT_2 0x2002 PrismGT USB 2.0 WLAN /* Yamaha products */ product YAMAHA UX256 0x1000 UX256 MIDI I/F product YAMAHA UX96 0x1008 UX96 MIDI I/F product YAMAHA RPU200 0x3104 RP-U200 product YAMAHA RTA54I 0x4000 NetVolante RTA54i Broadband&ISDN Router product YAMAHA RTW65B 0x4001 NetVolante RTW65b Broadband Wireless Router product YAMAHA RTW65I 0x4002 NetVolante RTW65i Broadband&ISDN Wireless Router product YAMAHA RTA55I 0x4004 NetVolante RTA55i Broadband VoIP Router /* Yano products */ product YANO U640MO 0x0101 U640MO-03 product YANO FW800HD 0x05fc METALWEAR-HDD /* Y.C. Cable products */ product YCCABLE PL2303 0x0fba PL2303 Serial /* Y-E Data products */ product YEDATA FLASHBUSTERU 0x0000 Flashbuster-U /* Yiso Wireless Co. products */ product YISO C893 0xc893 CDMA 2000 1xEVDO PC Card /* Z-Com products */ product ZCOM M4Y750 0x0001 M4Y-750 product ZCOM XI725 0x0002 XI-725/726 product ZCOM XI735 0x0005 XI-735 product ZCOM XG703A 0x0008 PrismGT USB 2.0 WLAN product ZCOM ZD1211 0x0011 ZD1211 product ZCOM AR5523 0x0012 AR5523 product ZCOM AR5523_NF 0x0013 AR5523 driver (no firmware) product ZCOM XM142 0x0015 XM-142 product ZCOM ZD1211B 0x001a ZD1211B product ZCOM RT2870_1 0x0022 RT2870 product ZCOM UB81 0x0023 UB81 product ZCOM RT2870_2 0x0025 RT2870 product ZCOM UB82 0x0026 UB82 /* Zinwell products */ product ZINWELL RT2570 0x0260 RT2570 product ZINWELL RT2870_1 0x0280 RT2870 product ZINWELL RT2870_2 0x0282 RT2870 product ZINWELL RT3072_1 0x0283 RT3072 product ZINWELL RT3072_2 0x0284 RT3072 product ZINWELL RT3070 0x5257 RT3070 /* Zoom Telephonics, Inc. products */ product ZOOM 2986L 0x9700 2986L Fax modem /* Zoran Microelectronics products */ product ZORAN EX20DSC 0x4343 Digital Camera EX-20 DSC /* Zydas Technology Corporation products */ product ZYDAS ZD1211 0x1211 ZD1211 WLAN abg product ZYDAS ZD1211B 0x1215 ZD1211B product ZYDAS ZD1221 0x1221 ZD1221 /* ZyXEL Communication Co. products */ product ZYXEL OMNI56K 0x1500 Omni 56K Plus product ZYXEL 980N 0x2011 Scorpion-980N keyboard product ZYXEL ZYAIRG220 0x3401 ZyAIR G-220 product ZYXEL G200V2 0x3407 G-200 v2 product ZYXEL AG225H 0x3409 AG-225H product ZYXEL M202 0x340a M-202 product ZYXEL G220V2 0x340f G-220 v2 product ZYXEL G202 0x3410 G-202 product ZYXEL RT2870_1 0x3416 RT2870 product ZYXEL NWD271N 0x3417 NWD-271N product ZYXEL NWD211AN 0x3418 NWD-211AN product ZYXEL RT2870_2 0x341a RT2870 product ZYXEL RT3070 0x341e NWD2105 product ZYXEL RTL8192CU 0x341f RTL8192CU product ZYXEL NWD2705 0x3421 NWD2705 product ZYXEL NWD6605 0x3426 NWD6605 Index: projects/clang400-import/sys/kern/kern_switch.c =================================================================== --- projects/clang400-import/sys/kern/kern_switch.c (revision 312719) +++ projects/clang400-import/sys/kern/kern_switch.c (revision 312720) @@ -1,513 +1,528 @@ /*- * Copyright (c) 2001 Jake Burkholder * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* Uncomment this to enable logging of critical_enter/exit. */ #if 0 #define KTR_CRITICAL KTR_SCHED #else #define KTR_CRITICAL 0 #endif #ifdef FULL_PREEMPTION #ifndef PREEMPTION #error "The FULL_PREEMPTION option requires the PREEMPTION option" #endif #endif CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); /* * kern.sched.preemption allows user space to determine if preemption support * is compiled in or not. It is not currently a boot or runtime flag that * can be changed. */ #ifdef PREEMPTION static int kern_sched_preemption = 1; #else static int kern_sched_preemption = 0; #endif SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD, &kern_sched_preemption, 0, "Kernel preemption enabled"); /* * Support for scheduler stats exported via kern.sched.stats. All stats may * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere * with SCHED_STAT_DEFINE(). */ #ifdef SCHED_STATS SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats"); /* Switch reasons from mi_switch(). */ DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]); SCHED_STAT_DEFINE_VAR(uncategorized, &DPCPU_NAME(sched_switch_stats[SWT_NONE]), ""); SCHED_STAT_DEFINE_VAR(preempt, &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), ""); SCHED_STAT_DEFINE_VAR(owepreempt, &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), ""); SCHED_STAT_DEFINE_VAR(turnstile, &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), ""); SCHED_STAT_DEFINE_VAR(sleepq, &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), ""); SCHED_STAT_DEFINE_VAR(sleepqtimo, &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), ""); SCHED_STAT_DEFINE_VAR(relinquish, &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), ""); SCHED_STAT_DEFINE_VAR(needresched, &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), ""); SCHED_STAT_DEFINE_VAR(idle, &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), ""); SCHED_STAT_DEFINE_VAR(iwait, &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), ""); SCHED_STAT_DEFINE_VAR(suspend, &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), ""); SCHED_STAT_DEFINE_VAR(remotepreempt, &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), ""); SCHED_STAT_DEFINE_VAR(remotewakeidle, &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), ""); static int sysctl_stats_reset(SYSCTL_HANDLER_ARGS) { struct sysctl_oid *p; uintptr_t counter; int error; int val; int i; val = 0; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (val == 0) return (0); /* * Traverse the list of children of _kern_sched_stats and reset each * to 0. Skip the reset entry. */ SLIST_FOREACH(p, oidp->oid_parent, oid_link) { if (p == oidp || p->oid_arg1 == NULL) continue; counter = (uintptr_t)p->oid_arg1; CPU_FOREACH(i) { *(long *)(dpcpu_off[i] + counter) = 0; } } return (0); } SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL, 0, sysctl_stats_reset, "I", "Reset scheduler statistics"); #endif /************************************************************************ * Functions that manipulate runnability from a thread perspective. * ************************************************************************/ /* * Select the thread that will be run next. */ struct thread * choosethread(void) { struct thread *td; retry: td = sched_choose(); /* * If we are in panic, only allow system threads, * plus the one we are running in, to be run. */ if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && (td->td_flags & TDF_INPANIC) == 0)) { /* note that it is no longer on the run queue */ TD_SET_CAN_RUN(td); goto retry; } TD_SET_RUNNING(td); return (td); } /* * Kernel thread preemption implementation. Critical sections mark * regions of code in which preemptions are not allowed. * * It might seem a good idea to inline critical_enter() but, in order * to prevent instructions reordering by the compiler, a __compiler_membar() * would have to be used here (the same as sched_pin()). The performance * penalty imposed by the membar could, then, produce slower code than * the function call itself, for most cases. */ void critical_enter(void) { struct thread *td; td = curthread; td->td_critnest++; CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, (long)td->td_proc->p_pid, td->td_name, td->td_critnest); } void critical_exit(void) { struct thread *td; int flags; td = curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); if (td->td_critnest == 1) { td->td_critnest = 0; + + /* + * Interrupt handlers execute critical_exit() on + * leave, and td_owepreempt may be left set by an + * interrupt handler only when td_critnest > 0. If we + * are decrementing td_critnest from 1 to 0, read + * td_owepreempt after decrementing, to not miss the + * preempt. Disallow compiler to reorder operations. + */ + __compiler_membar(); if (td->td_owepreempt && !kdb_active) { + /* + * Microoptimization: we committed to switch, + * disable preemption in interrupt handlers + * while spinning for the thread lock. + */ td->td_critnest = 1; thread_lock(td); td->td_critnest--; flags = SW_INVOL | SW_PREEMPT; if (TD_IS_IDLETHREAD(td)) flags |= SWT_IDLE; else flags |= SWT_OWEPREEMPT; mi_switch(flags, NULL); thread_unlock(td); } } else td->td_critnest--; CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, (long)td->td_proc->p_pid, td->td_name, td->td_critnest); } /************************************************************************ * SYSTEM RUN QUEUE manipulations and tests * ************************************************************************/ /* * Initialize a run structure. */ void runq_init(struct runq *rq) { int i; bzero(rq, sizeof *rq); for (i = 0; i < RQ_NQS; i++) TAILQ_INIT(&rq->rq_queues[i]); } /* * Clear the status bit of the queue corresponding to priority level pri, * indicating that it is empty. */ static __inline void runq_clrbit(struct runq *rq, int pri) { struct rqbits *rqb; rqb = &rq->rq_status; CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", rqb->rqb_bits[RQB_WORD(pri)], rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), RQB_BIT(pri), RQB_WORD(pri)); rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); } /* * Find the index of the first non-empty run queue. This is done by * scanning the status bits, a set bit indicates a non-empty queue. */ static __inline int runq_findbit(struct runq *rq) { struct rqbits *rqb; int pri; int i; rqb = &rq->rq_status; for (i = 0; i < RQB_LEN; i++) if (rqb->rqb_bits[i]) { pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", rqb->rqb_bits[i], i, pri); return (pri); } return (-1); } static __inline int runq_findbit_from(struct runq *rq, u_char pri) { struct rqbits *rqb; rqb_word_t mask; int i; /* * Set the mask for the first word so we ignore priorities before 'pri'. */ mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1)); rqb = &rq->rq_status; again: for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) { mask = rqb->rqb_bits[i] & mask; if (mask == 0) continue; pri = RQB_FFS(mask) + (i << RQB_L2BPW); CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d", mask, i, pri); return (pri); } if (pri == 0) return (-1); /* * Wrap back around to the beginning of the list just once so we * scan the whole thing. */ pri = 0; goto again; } /* * Set the status bit of the queue corresponding to priority level pri, * indicating that it is non-empty. */ static __inline void runq_setbit(struct runq *rq, int pri) { struct rqbits *rqb; rqb = &rq->rq_status; CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", rqb->rqb_bits[RQB_WORD(pri)], rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), RQB_BIT(pri), RQB_WORD(pri)); rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); } /* * Add the thread to the queue specified by its priority, and set the * corresponding status bit. */ void runq_add(struct runq *rq, struct thread *td, int flags) { struct rqhead *rqh; int pri; pri = td->td_priority / RQ_PPQ; td->td_rqindex = pri; runq_setbit(rq, pri); rqh = &rq->rq_queues[pri]; CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p", td, td->td_priority, pri, rqh); if (flags & SRQ_PREEMPTED) { TAILQ_INSERT_HEAD(rqh, td, td_runq); } else { TAILQ_INSERT_TAIL(rqh, td, td_runq); } } void runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags) { struct rqhead *rqh; KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri)); td->td_rqindex = pri; runq_setbit(rq, pri); rqh = &rq->rq_queues[pri]; CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p", td, td->td_priority, pri, rqh); if (flags & SRQ_PREEMPTED) { TAILQ_INSERT_HEAD(rqh, td, td_runq); } else { TAILQ_INSERT_TAIL(rqh, td, td_runq); } } /* * Return true if there are runnable processes of any priority on the run * queue, false otherwise. Has no side effects, does not modify the run * queue structure. */ int runq_check(struct runq *rq) { struct rqbits *rqb; int i; rqb = &rq->rq_status; for (i = 0; i < RQB_LEN; i++) if (rqb->rqb_bits[i]) { CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", rqb->rqb_bits[i], i); return (1); } CTR0(KTR_RUNQ, "runq_check: empty"); return (0); } /* * Find the highest priority process on the run queue. */ struct thread * runq_choose_fuzz(struct runq *rq, int fuzz) { struct rqhead *rqh; struct thread *td; int pri; while ((pri = runq_findbit(rq)) != -1) { rqh = &rq->rq_queues[pri]; /* fuzz == 1 is normal.. 0 or less are ignored */ if (fuzz > 1) { /* * In the first couple of entries, check if * there is one for our CPU as a preference. */ int count = fuzz; int cpu = PCPU_GET(cpuid); struct thread *td2; td2 = td = TAILQ_FIRST(rqh); while (count-- && td2) { if (td2->td_lastcpu == cpu) { td = td2; break; } td2 = TAILQ_NEXT(td2, td_runq); } } else td = TAILQ_FIRST(rqh); KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue")); CTR3(KTR_RUNQ, "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh); return (td); } CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri); return (NULL); } /* * Find the highest priority process on the run queue. */ struct thread * runq_choose(struct runq *rq) { struct rqhead *rqh; struct thread *td; int pri; while ((pri = runq_findbit(rq)) != -1) { rqh = &rq->rq_queues[pri]; td = TAILQ_FIRST(rqh); KASSERT(td != NULL, ("runq_choose: no thread on busy queue")); CTR3(KTR_RUNQ, "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh); return (td); } CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri); return (NULL); } struct thread * runq_choose_from(struct runq *rq, u_char idx) { struct rqhead *rqh; struct thread *td; int pri; if ((pri = runq_findbit_from(rq, idx)) != -1) { rqh = &rq->rq_queues[pri]; td = TAILQ_FIRST(rqh); KASSERT(td != NULL, ("runq_choose: no thread on busy queue")); CTR4(KTR_RUNQ, "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p", pri, td, td->td_rqindex, rqh); return (td); } CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri); return (NULL); } /* * Remove the thread from the queue specified by its priority, and clear the * corresponding status bit if the queue becomes empty. * Caller must set state afterwards. */ void runq_remove(struct runq *rq, struct thread *td) { runq_remove_idx(rq, td, NULL); } void runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx) { struct rqhead *rqh; u_char pri; KASSERT(td->td_flags & TDF_INMEM, ("runq_remove_idx: thread swapped out")); pri = td->td_rqindex; KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri)); rqh = &rq->rq_queues[pri]; CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p", td, td->td_priority, pri, rqh); TAILQ_REMOVE(rqh, td, td_runq); if (TAILQ_EMPTY(rqh)) { CTR0(KTR_RUNQ, "runq_remove_idx: empty"); runq_clrbit(rq, pri); if (idx != NULL && *idx == pri) *idx = (pri + 1) % RQ_NQS; } } Index: projects/clang400-import/sys/kern/kern_time.c =================================================================== --- projects/clang400-import/sys/kern/kern_time.c (revision 312719) +++ projects/clang400-import/sys/kern/kern_time.c (revision 312720) @@ -1,1673 +1,1680 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #define MAX_CLOCKS (CLOCK_MONOTONIC+1) #define CPUCLOCK_BIT 0x80000000 #define CPUCLOCK_PROCESS_BIT 0x40000000 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT)) #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid)) #define MAKE_PROCESS_CPUCLOCK(pid) \ (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid)) static struct kclock posix_clocks[MAX_CLOCKS]; static uma_zone_t itimer_zone = NULL; /* * Time of day and interval timer support. * * These routines provide the kernel entry points to get and set * the time-of-day and per-process interval timers. Subroutines * here provide support for adding and subtracting timeval structures * and decrementing interval timers, optionally reloading the interval * timers when they expire. */ static int settime(struct thread *, struct timeval *); static void timevalfix(struct timeval *); static void itimer_start(void); static int itimer_init(void *, int, int); static void itimer_fini(void *, int); static void itimer_enter(struct itimer *); static void itimer_leave(struct itimer *); static struct itimer *itimer_find(struct proc *, int); static void itimers_alloc(struct proc *); static void itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp); static void itimers_event_hook_exit(void *arg, struct proc *p); static int realtimer_create(struct itimer *); static int realtimer_gettime(struct itimer *, struct itimerspec *); static int realtimer_settime(struct itimer *, int, struct itimerspec *, struct itimerspec *); static int realtimer_delete(struct itimer *); static void realtimer_clocktime(clockid_t, struct timespec *); static void realtimer_expire(void *); int register_posix_clock(int, struct kclock *); void itimer_fire(struct itimer *it); int itimespecfix(struct timespec *ts); #define CLOCK_CALL(clock, call, arglist) \ ((*posix_clocks[clock].call) arglist) SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL); static int settime(struct thread *td, struct timeval *tv) { struct timeval delta, tv1, tv2; static struct timeval maxtime, laststep; struct timespec ts; microtime(&tv1); delta = *tv; timevalsub(&delta, &tv1); /* * If the system is secure, we do not allow the time to be * set to a value earlier than 1 second less than the highest * time we have yet seen. The worst a miscreant can do in * this circumstance is "freeze" time. He couldn't go * back to the past. * * We similarly do not allow the clock to be stepped more * than one second, nor more than once per second. This allows * a miscreant to make the clock march double-time, but no worse. */ if (securelevel_gt(td->td_ucred, 1) != 0) { if (delta.tv_sec < 0 || delta.tv_usec < 0) { /* * Update maxtime to latest time we've seen. */ if (tv1.tv_sec > maxtime.tv_sec) maxtime = tv1; tv2 = *tv; timevalsub(&tv2, &maxtime); if (tv2.tv_sec < -1) { tv->tv_sec = maxtime.tv_sec - 1; printf("Time adjustment clamped to -1 second\n"); } } else { if (tv1.tv_sec == laststep.tv_sec) return (EPERM); if (delta.tv_sec > 1) { tv->tv_sec = tv1.tv_sec + 1; printf("Time adjustment clamped to +1 second\n"); } laststep = *tv; } } ts.tv_sec = tv->tv_sec; ts.tv_nsec = tv->tv_usec * 1000; tc_setclock(&ts); resettodr(); return (0); } #ifndef _SYS_SYSPROTO_H_ struct clock_getcpuclockid2_args { id_t id; int which, clockid_t *clock_id; }; #endif /* ARGSUSED */ int sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap) { clockid_t clk_id; int error; error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id); if (error == 0) error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t)); return (error); } int kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, clockid_t *clk_id) { struct proc *p; pid_t pid; lwpid_t tid; int error; switch (which) { case CPUCLOCK_WHICH_PID: if (id != 0) { error = pget(id, PGET_CANSEE | PGET_NOTID, &p); if (error != 0) return (error); PROC_UNLOCK(p); pid = id; } else { pid = td->td_proc->p_pid; } *clk_id = MAKE_PROCESS_CPUCLOCK(pid); return (0); case CPUCLOCK_WHICH_TID: tid = id == 0 ? td->td_tid : id; *clk_id = MAKE_THREAD_CPUCLOCK(tid); return (0); default: return (EINVAL); } } #ifndef _SYS_SYSPROTO_H_ struct clock_gettime_args { clockid_t clock_id; struct timespec *tp; }; #endif /* ARGSUSED */ int sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap) { struct timespec ats; int error; error = kern_clock_gettime(td, uap->clock_id, &ats); if (error == 0) error = copyout(&ats, uap->tp, sizeof(ats)); return (error); } static inline void cputick2timespec(uint64_t runtime, struct timespec *ats) { runtime = cputick2usec(runtime); ats->tv_sec = runtime / 1000000; ats->tv_nsec = runtime % 1000000 * 1000; } static void get_thread_cputime(struct thread *targettd, struct timespec *ats) { uint64_t runtime, curtime, switchtime; if (targettd == NULL) { /* current thread */ critical_enter(); switchtime = PCPU_GET(switchtime); curtime = cpu_ticks(); runtime = curthread->td_runtime; critical_exit(); runtime += curtime - switchtime; } else { thread_lock(targettd); runtime = targettd->td_runtime; thread_unlock(targettd); } cputick2timespec(runtime, ats); } static void get_process_cputime(struct proc *targetp, struct timespec *ats) { uint64_t runtime; struct rusage ru; PROC_STATLOCK(targetp); rufetch(targetp, &ru); runtime = targetp->p_rux.rux_runtime; PROC_STATUNLOCK(targetp); cputick2timespec(runtime, ats); } static int get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats) { struct proc *p, *p2; struct thread *td2; lwpid_t tid; pid_t pid; int error; p = td->td_proc; if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) { tid = clock_id & CPUCLOCK_ID_MASK; td2 = tdfind(tid, p->p_pid); if (td2 == NULL) return (EINVAL); get_thread_cputime(td2, ats); PROC_UNLOCK(td2->td_proc); } else { pid = clock_id & CPUCLOCK_ID_MASK; error = pget(pid, PGET_CANSEE, &p2); if (error != 0) return (EINVAL); get_process_cputime(p2, ats); PROC_UNLOCK(p2); } return (0); } int kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats) { struct timeval sys, user; struct proc *p; p = td->td_proc; switch (clock_id) { case CLOCK_REALTIME: /* Default to precise. */ case CLOCK_REALTIME_PRECISE: nanotime(ats); break; case CLOCK_REALTIME_FAST: getnanotime(ats); break; case CLOCK_VIRTUAL: PROC_LOCK(p); PROC_STATLOCK(p); calcru(p, &user, &sys); PROC_STATUNLOCK(p); PROC_UNLOCK(p); TIMEVAL_TO_TIMESPEC(&user, ats); break; case CLOCK_PROF: PROC_LOCK(p); PROC_STATLOCK(p); calcru(p, &user, &sys); PROC_STATUNLOCK(p); PROC_UNLOCK(p); timevaladd(&user, &sys); TIMEVAL_TO_TIMESPEC(&user, ats); break; case CLOCK_MONOTONIC: /* Default to precise. */ case CLOCK_MONOTONIC_PRECISE: case CLOCK_UPTIME: case CLOCK_UPTIME_PRECISE: nanouptime(ats); break; case CLOCK_UPTIME_FAST: case CLOCK_MONOTONIC_FAST: getnanouptime(ats); break; case CLOCK_SECOND: ats->tv_sec = time_second; ats->tv_nsec = 0; break; case CLOCK_THREAD_CPUTIME_ID: get_thread_cputime(NULL, ats); break; case CLOCK_PROCESS_CPUTIME_ID: PROC_LOCK(p); get_process_cputime(p, ats); PROC_UNLOCK(p); break; default: if ((int)clock_id >= 0) return (EINVAL); return (get_cputime(td, clock_id, ats)); } return (0); } #ifndef _SYS_SYSPROTO_H_ struct clock_settime_args { clockid_t clock_id; const struct timespec *tp; }; #endif /* ARGSUSED */ int sys_clock_settime(struct thread *td, struct clock_settime_args *uap) { struct timespec ats; int error; if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) return (error); return (kern_clock_settime(td, uap->clock_id, &ats)); } +static int allow_insane_settime = 0; +SYSCTL_INT(_debug, OID_AUTO, allow_insane_settime, CTLFLAG_RWTUN, + &allow_insane_settime, 0, + "do not perform possibly restrictive checks on settime(2) args"); + int kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats) { struct timeval atv; int error; if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) return (error); if (clock_id != CLOCK_REALTIME) return (EINVAL); if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000 || ats->tv_sec < 0) + return (EINVAL); + if (!allow_insane_settime && ats->tv_sec > 9999ULL * 366 * 24 * 60 * 60) return (EINVAL); /* XXX Don't convert nsec->usec and back */ TIMESPEC_TO_TIMEVAL(&atv, ats); error = settime(td, &atv); return (error); } #ifndef _SYS_SYSPROTO_H_ struct clock_getres_args { clockid_t clock_id; struct timespec *tp; }; #endif int sys_clock_getres(struct thread *td, struct clock_getres_args *uap) { struct timespec ts; int error; if (uap->tp == NULL) return (0); error = kern_clock_getres(td, uap->clock_id, &ts); if (error == 0) error = copyout(&ts, uap->tp, sizeof(ts)); return (error); } int kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts) { ts->tv_sec = 0; switch (clock_id) { case CLOCK_REALTIME: case CLOCK_REALTIME_FAST: case CLOCK_REALTIME_PRECISE: case CLOCK_MONOTONIC: case CLOCK_MONOTONIC_FAST: case CLOCK_MONOTONIC_PRECISE: case CLOCK_UPTIME: case CLOCK_UPTIME_FAST: case CLOCK_UPTIME_PRECISE: /* * Round up the result of the division cheaply by adding 1. * Rounding up is especially important if rounding down * would give 0. Perfect rounding is unimportant. */ ts->tv_nsec = 1000000000 / tc_getfrequency() + 1; break; case CLOCK_VIRTUAL: case CLOCK_PROF: /* Accurately round up here because we can do so cheaply. */ ts->tv_nsec = howmany(1000000000, hz); break; case CLOCK_SECOND: ts->tv_sec = 1; ts->tv_nsec = 0; break; case CLOCK_THREAD_CPUTIME_ID: case CLOCK_PROCESS_CPUTIME_ID: cputime: /* sync with cputick2usec */ ts->tv_nsec = 1000000 / cpu_tickrate(); if (ts->tv_nsec == 0) ts->tv_nsec = 1000; break; default: if ((int)clock_id < 0) goto cputime; return (EINVAL); } return (0); } static uint8_t nanowait[MAXCPU]; int kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) { struct timespec ts; sbintime_t sbt, sbtt, prec, tmp; time_t over; int error; if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) return (EINVAL); if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) return (0); ts = *rqt; if (ts.tv_sec > INT32_MAX / 2) { over = ts.tv_sec - INT32_MAX / 2; ts.tv_sec -= over; } else over = 0; tmp = tstosbt(ts); prec = tmp; prec >>= tc_precexp; if (TIMESEL(&sbt, tmp)) sbt += tc_tick_sbt; sbt += tmp; error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp", sbt, prec, C_ABSOLUTE); if (error != EWOULDBLOCK) { if (error == ERESTART) error = EINTR; TIMESEL(&sbtt, tmp); if (rmt != NULL) { ts = sbttots(sbt - sbtt); ts.tv_sec += over; if (ts.tv_sec < 0) timespecclear(&ts); *rmt = ts; } if (sbtt >= sbt) return (0); return (error); } return (0); } #ifndef _SYS_SYSPROTO_H_ struct nanosleep_args { struct timespec *rqtp; struct timespec *rmtp; }; #endif /* ARGSUSED */ int sys_nanosleep(struct thread *td, struct nanosleep_args *uap) { struct timespec rmt, rqt; int error; error = copyin(uap->rqtp, &rqt, sizeof(rqt)); if (error) return (error); if (uap->rmtp && !useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE)) return (EFAULT); error = kern_nanosleep(td, &rqt, &rmt); if (error && uap->rmtp) { int error2; error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); if (error2) error = error2; } return (error); } #ifndef _SYS_SYSPROTO_H_ struct gettimeofday_args { struct timeval *tp; struct timezone *tzp; }; #endif /* ARGSUSED */ int sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap) { struct timeval atv; struct timezone rtz; int error = 0; if (uap->tp) { microtime(&atv); error = copyout(&atv, uap->tp, sizeof (atv)); } if (error == 0 && uap->tzp != NULL) { rtz.tz_minuteswest = tz_minuteswest; rtz.tz_dsttime = tz_dsttime; error = copyout(&rtz, uap->tzp, sizeof (rtz)); } return (error); } #ifndef _SYS_SYSPROTO_H_ struct settimeofday_args { struct timeval *tv; struct timezone *tzp; }; #endif /* ARGSUSED */ int sys_settimeofday(struct thread *td, struct settimeofday_args *uap) { struct timeval atv, *tvp; struct timezone atz, *tzp; int error; if (uap->tv) { error = copyin(uap->tv, &atv, sizeof(atv)); if (error) return (error); tvp = &atv; } else tvp = NULL; if (uap->tzp) { error = copyin(uap->tzp, &atz, sizeof(atz)); if (error) return (error); tzp = &atz; } else tzp = NULL; return (kern_settimeofday(td, tvp, tzp)); } int kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp) { int error; error = priv_check(td, PRIV_SETTIMEOFDAY); if (error) return (error); /* Verify all parameters before changing time. */ if (tv) { if (tv->tv_usec < 0 || tv->tv_usec >= 1000000 || tv->tv_sec < 0) return (EINVAL); error = settime(td, tv); } if (tzp && error == 0) { tz_minuteswest = tzp->tz_minuteswest; tz_dsttime = tzp->tz_dsttime; } return (error); } /* * Get value of an interval timer. The process virtual and profiling virtual * time timers are kept in the p_stats area, since they can be swapped out. * These are kept internally in the way they are specified externally: in * time until they expire. * * The real time interval timer is kept in the process table slot for the * process, and its value (it_value) is kept as an absolute time rather than * as a delta, so that it is easy to keep periodic real-time signals from * drifting. * * Virtual time timers are processed in the hardclock() routine of * kern_clock.c. The real time timer is processed by a timeout routine, * called from the softclock() routine. Since a callout may be delayed in * real time due to interrupt processing in the system, it is possible for * the real time timeout routine (realitexpire, given below), to be delayed * in real time past when it is supposed to occur. It does not suffice, * therefore, to reload the real timer .it_value from the real time timers * .it_interval. Rather, we compute the next time in absolute time the timer * should go off. */ #ifndef _SYS_SYSPROTO_H_ struct getitimer_args { u_int which; struct itimerval *itv; }; #endif int sys_getitimer(struct thread *td, struct getitimer_args *uap) { struct itimerval aitv; int error; error = kern_getitimer(td, uap->which, &aitv); if (error != 0) return (error); return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); } int kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv) { struct proc *p = td->td_proc; struct timeval ctv; if (which > ITIMER_PROF) return (EINVAL); if (which == ITIMER_REAL) { /* * Convert from absolute to relative time in .it_value * part of real time timer. If time for real time timer * has passed return 0, else return difference between * current time and time for the timer to go off. */ PROC_LOCK(p); *aitv = p->p_realtimer; PROC_UNLOCK(p); if (timevalisset(&aitv->it_value)) { microuptime(&ctv); if (timevalcmp(&aitv->it_value, &ctv, <)) timevalclear(&aitv->it_value); else timevalsub(&aitv->it_value, &ctv); } } else { PROC_ITIMLOCK(p); *aitv = p->p_stats->p_timer[which]; PROC_ITIMUNLOCK(p); } #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT)) ktritimerval(aitv); #endif return (0); } #ifndef _SYS_SYSPROTO_H_ struct setitimer_args { u_int which; struct itimerval *itv, *oitv; }; #endif int sys_setitimer(struct thread *td, struct setitimer_args *uap) { struct itimerval aitv, oitv; int error; if (uap->itv == NULL) { uap->itv = uap->oitv; return (sys_getitimer(td, (struct getitimer_args *)uap)); } if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval)))) return (error); error = kern_setitimer(td, uap->which, &aitv, &oitv); if (error != 0 || uap->oitv == NULL) return (error); return (copyout(&oitv, uap->oitv, sizeof(struct itimerval))); } int kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv, struct itimerval *oitv) { struct proc *p = td->td_proc; struct timeval ctv; sbintime_t sbt, pr; if (aitv == NULL) return (kern_getitimer(td, which, oitv)); if (which > ITIMER_PROF) return (EINVAL); #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT)) ktritimerval(aitv); #endif if (itimerfix(&aitv->it_value) || aitv->it_value.tv_sec > INT32_MAX / 2) return (EINVAL); if (!timevalisset(&aitv->it_value)) timevalclear(&aitv->it_interval); else if (itimerfix(&aitv->it_interval) || aitv->it_interval.tv_sec > INT32_MAX / 2) return (EINVAL); if (which == ITIMER_REAL) { PROC_LOCK(p); if (timevalisset(&p->p_realtimer.it_value)) callout_stop(&p->p_itcallout); microuptime(&ctv); if (timevalisset(&aitv->it_value)) { pr = tvtosbt(aitv->it_value) >> tc_precexp; timevaladd(&aitv->it_value, &ctv); sbt = tvtosbt(aitv->it_value); callout_reset_sbt(&p->p_itcallout, sbt, pr, realitexpire, p, C_ABSOLUTE); } *oitv = p->p_realtimer; p->p_realtimer = *aitv; PROC_UNLOCK(p); if (timevalisset(&oitv->it_value)) { if (timevalcmp(&oitv->it_value, &ctv, <)) timevalclear(&oitv->it_value); else timevalsub(&oitv->it_value, &ctv); } } else { if (aitv->it_interval.tv_sec == 0 && aitv->it_interval.tv_usec != 0 && aitv->it_interval.tv_usec < tick) aitv->it_interval.tv_usec = tick; if (aitv->it_value.tv_sec == 0 && aitv->it_value.tv_usec != 0 && aitv->it_value.tv_usec < tick) aitv->it_value.tv_usec = tick; PROC_ITIMLOCK(p); *oitv = p->p_stats->p_timer[which]; p->p_stats->p_timer[which] = *aitv; PROC_ITIMUNLOCK(p); } #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT)) ktritimerval(oitv); #endif return (0); } /* * Real interval timer expired: * send process whose timer expired an alarm signal. * If time is not set up to reload, then just return. * Else compute next time timer should go off which is > current time. * This is where delay in processing this timeout causes multiple * SIGALRM calls to be compressed into one. * tvtohz() always adds 1 to allow for the time until the next clock * interrupt being strictly less than 1 clock tick, but we don't want * that here since we want to appear to be in sync with the clock * interrupt even when we're delayed. */ void realitexpire(void *arg) { struct proc *p; struct timeval ctv; sbintime_t isbt; p = (struct proc *)arg; kern_psignal(p, SIGALRM); if (!timevalisset(&p->p_realtimer.it_interval)) { timevalclear(&p->p_realtimer.it_value); if (p->p_flag & P_WEXIT) wakeup(&p->p_itcallout); return; } isbt = tvtosbt(p->p_realtimer.it_interval); if (isbt >= sbt_timethreshold) getmicrouptime(&ctv); else microuptime(&ctv); do { timevaladd(&p->p_realtimer.it_value, &p->p_realtimer.it_interval); } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=)); callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value), isbt >> tc_precexp, realitexpire, p, C_ABSOLUTE); } /* * Check that a proposed value to load into the .it_value or * .it_interval part of an interval timer is acceptable, and * fix it to have at least minimal value (i.e. if it is less * than the resolution of the clock, round it up.) */ int itimerfix(struct timeval *tv) { if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) return (EINVAL); if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < (u_int)tick / 16) tv->tv_usec = (u_int)tick / 16; return (0); } /* * Decrement an interval timer by a specified number * of microseconds, which must be less than a second, * i.e. < 1000000. If the timer expires, then reload * it. In this case, carry over (usec - old value) to * reduce the value reloaded into the timer so that * the timer does not drift. This routine assumes * that it is called in a context where the timers * on which it is operating cannot change in value. */ int itimerdecr(struct itimerval *itp, int usec) { if (itp->it_value.tv_usec < usec) { if (itp->it_value.tv_sec == 0) { /* expired, and already in next interval */ usec -= itp->it_value.tv_usec; goto expire; } itp->it_value.tv_usec += 1000000; itp->it_value.tv_sec--; } itp->it_value.tv_usec -= usec; usec = 0; if (timevalisset(&itp->it_value)) return (1); /* expired, exactly at end of interval */ expire: if (timevalisset(&itp->it_interval)) { itp->it_value = itp->it_interval; itp->it_value.tv_usec -= usec; if (itp->it_value.tv_usec < 0) { itp->it_value.tv_usec += 1000000; itp->it_value.tv_sec--; } } else itp->it_value.tv_usec = 0; /* sec is already 0 */ return (0); } /* * Add and subtract routines for timevals. * N.B.: subtract routine doesn't deal with * results which are before the beginning, * it just gets very confused in this case. * Caveat emptor. */ void timevaladd(struct timeval *t1, const struct timeval *t2) { t1->tv_sec += t2->tv_sec; t1->tv_usec += t2->tv_usec; timevalfix(t1); } void timevalsub(struct timeval *t1, const struct timeval *t2) { t1->tv_sec -= t2->tv_sec; t1->tv_usec -= t2->tv_usec; timevalfix(t1); } static void timevalfix(struct timeval *t1) { if (t1->tv_usec < 0) { t1->tv_sec--; t1->tv_usec += 1000000; } if (t1->tv_usec >= 1000000) { t1->tv_sec++; t1->tv_usec -= 1000000; } } /* * ratecheck(): simple time-based rate-limit checking. */ int ratecheck(struct timeval *lasttime, const struct timeval *mininterval) { struct timeval tv, delta; int rv = 0; getmicrouptime(&tv); /* NB: 10ms precision */ delta = tv; timevalsub(&delta, lasttime); /* * check for 0,0 is so that the message will be seen at least once, * even if interval is huge. */ if (timevalcmp(&delta, mininterval, >=) || (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { *lasttime = tv; rv = 1; } return (rv); } /* * ppsratecheck(): packets (or events) per second limitation. * * Return 0 if the limit is to be enforced (e.g. the caller * should drop a packet because of the rate limitation). * * maxpps of 0 always causes zero to be returned. maxpps of -1 * always causes 1 to be returned; this effectively defeats rate * limiting. * * Note that we maintain the struct timeval for compatibility * with other bsd systems. We reuse the storage and just monitor * clock ticks for minimal overhead. */ int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) { int now; /* * Reset the last time and counter if this is the first call * or more than a second has passed since the last update of * lasttime. */ now = ticks; if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { lasttime->tv_sec = now; *curpps = 1; return (maxpps != 0); } else { (*curpps)++; /* NB: ignore potential overflow */ return (maxpps < 0 || *curpps <= maxpps); } } static void itimer_start(void) { struct kclock rt_clock = { .timer_create = realtimer_create, .timer_delete = realtimer_delete, .timer_settime = realtimer_settime, .timer_gettime = realtimer_gettime, .event_hook = NULL }; itimer_zone = uma_zcreate("itimer", sizeof(struct itimer), NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0); register_posix_clock(CLOCK_REALTIME, &rt_clock); register_posix_clock(CLOCK_MONOTONIC, &rt_clock); p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L); p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX); p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX); EVENTHANDLER_REGISTER(process_exit, itimers_event_hook_exit, (void *)ITIMER_EV_EXIT, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER(process_exec, itimers_event_hook_exec, (void *)ITIMER_EV_EXEC, EVENTHANDLER_PRI_ANY); } int register_posix_clock(int clockid, struct kclock *clk) { if ((unsigned)clockid >= MAX_CLOCKS) { printf("%s: invalid clockid\n", __func__); return (0); } posix_clocks[clockid] = *clk; return (1); } static int itimer_init(void *mem, int size, int flags) { struct itimer *it; it = (struct itimer *)mem; mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF); return (0); } static void itimer_fini(void *mem, int size) { struct itimer *it; it = (struct itimer *)mem; mtx_destroy(&it->it_mtx); } static void itimer_enter(struct itimer *it) { mtx_assert(&it->it_mtx, MA_OWNED); it->it_usecount++; } static void itimer_leave(struct itimer *it) { mtx_assert(&it->it_mtx, MA_OWNED); KASSERT(it->it_usecount > 0, ("invalid it_usecount")); if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0) wakeup(it); } #ifndef _SYS_SYSPROTO_H_ struct ktimer_create_args { clockid_t clock_id; struct sigevent * evp; int * timerid; }; #endif int sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap) { struct sigevent *evp, ev; int id; int error; if (uap->evp == NULL) { evp = NULL; } else { error = copyin(uap->evp, &ev, sizeof(ev)); if (error != 0) return (error); evp = &ev; } error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1); if (error == 0) { error = copyout(&id, uap->timerid, sizeof(int)); if (error != 0) kern_ktimer_delete(td, id); } return (error); } int kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, int *timerid, int preset_id) { struct proc *p = td->td_proc; struct itimer *it; int id; int error; if (clock_id < 0 || clock_id >= MAX_CLOCKS) return (EINVAL); if (posix_clocks[clock_id].timer_create == NULL) return (EINVAL); if (evp != NULL) { if (evp->sigev_notify != SIGEV_NONE && evp->sigev_notify != SIGEV_SIGNAL && evp->sigev_notify != SIGEV_THREAD_ID) return (EINVAL); if ((evp->sigev_notify == SIGEV_SIGNAL || evp->sigev_notify == SIGEV_THREAD_ID) && !_SIG_VALID(evp->sigev_signo)) return (EINVAL); } if (p->p_itimers == NULL) itimers_alloc(p); it = uma_zalloc(itimer_zone, M_WAITOK); it->it_flags = 0; it->it_usecount = 0; it->it_active = 0; timespecclear(&it->it_time.it_value); timespecclear(&it->it_time.it_interval); it->it_overrun = 0; it->it_overrun_last = 0; it->it_clockid = clock_id; it->it_timerid = -1; it->it_proc = p; ksiginfo_init(&it->it_ksi); it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT; error = CLOCK_CALL(clock_id, timer_create, (it)); if (error != 0) goto out; PROC_LOCK(p); if (preset_id != -1) { KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id")); id = preset_id; if (p->p_itimers->its_timers[id] != NULL) { PROC_UNLOCK(p); error = 0; goto out; } } else { /* * Find a free timer slot, skipping those reserved * for setitimer(). */ for (id = 3; id < TIMER_MAX; id++) if (p->p_itimers->its_timers[id] == NULL) break; if (id == TIMER_MAX) { PROC_UNLOCK(p); error = EAGAIN; goto out; } } it->it_timerid = id; p->p_itimers->its_timers[id] = it; if (evp != NULL) it->it_sigev = *evp; else { it->it_sigev.sigev_notify = SIGEV_SIGNAL; switch (clock_id) { default: case CLOCK_REALTIME: it->it_sigev.sigev_signo = SIGALRM; break; case CLOCK_VIRTUAL: it->it_sigev.sigev_signo = SIGVTALRM; break; case CLOCK_PROF: it->it_sigev.sigev_signo = SIGPROF; break; } it->it_sigev.sigev_value.sival_int = id; } if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { it->it_ksi.ksi_signo = it->it_sigev.sigev_signo; it->it_ksi.ksi_code = SI_TIMER; it->it_ksi.ksi_value = it->it_sigev.sigev_value; it->it_ksi.ksi_timerid = id; } PROC_UNLOCK(p); *timerid = id; return (0); out: ITIMER_LOCK(it); CLOCK_CALL(it->it_clockid, timer_delete, (it)); ITIMER_UNLOCK(it); uma_zfree(itimer_zone, it); return (error); } #ifndef _SYS_SYSPROTO_H_ struct ktimer_delete_args { int timerid; }; #endif int sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap) { return (kern_ktimer_delete(td, uap->timerid)); } static struct itimer * itimer_find(struct proc *p, int timerid) { struct itimer *it; PROC_LOCK_ASSERT(p, MA_OWNED); if ((p->p_itimers == NULL) || (timerid < 0) || (timerid >= TIMER_MAX) || (it = p->p_itimers->its_timers[timerid]) == NULL) { return (NULL); } ITIMER_LOCK(it); if ((it->it_flags & ITF_DELETING) != 0) { ITIMER_UNLOCK(it); it = NULL; } return (it); } int kern_ktimer_delete(struct thread *td, int timerid) { struct proc *p = td->td_proc; struct itimer *it; PROC_LOCK(p); it = itimer_find(p, timerid); if (it == NULL) { PROC_UNLOCK(p); return (EINVAL); } PROC_UNLOCK(p); it->it_flags |= ITF_DELETING; while (it->it_usecount > 0) { it->it_flags |= ITF_WANTED; msleep(it, &it->it_mtx, PPAUSE, "itimer", 0); } it->it_flags &= ~ITF_WANTED; CLOCK_CALL(it->it_clockid, timer_delete, (it)); ITIMER_UNLOCK(it); PROC_LOCK(p); if (KSI_ONQ(&it->it_ksi)) sigqueue_take(&it->it_ksi); p->p_itimers->its_timers[timerid] = NULL; PROC_UNLOCK(p); uma_zfree(itimer_zone, it); return (0); } #ifndef _SYS_SYSPROTO_H_ struct ktimer_settime_args { int timerid; int flags; const struct itimerspec * value; struct itimerspec * ovalue; }; #endif int sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap) { struct itimerspec val, oval, *ovalp; int error; error = copyin(uap->value, &val, sizeof(val)); if (error != 0) return (error); ovalp = uap->ovalue != NULL ? &oval : NULL; error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp); if (error == 0 && uap->ovalue != NULL) error = copyout(ovalp, uap->ovalue, sizeof(*ovalp)); return (error); } int kern_ktimer_settime(struct thread *td, int timer_id, int flags, struct itimerspec *val, struct itimerspec *oval) { struct proc *p; struct itimer *it; int error; p = td->td_proc; PROC_LOCK(p); if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { PROC_UNLOCK(p); error = EINVAL; } else { PROC_UNLOCK(p); itimer_enter(it); error = CLOCK_CALL(it->it_clockid, timer_settime, (it, flags, val, oval)); itimer_leave(it); ITIMER_UNLOCK(it); } return (error); } #ifndef _SYS_SYSPROTO_H_ struct ktimer_gettime_args { int timerid; struct itimerspec * value; }; #endif int sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap) { struct itimerspec val; int error; error = kern_ktimer_gettime(td, uap->timerid, &val); if (error == 0) error = copyout(&val, uap->value, sizeof(val)); return (error); } int kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val) { struct proc *p; struct itimer *it; int error; p = td->td_proc; PROC_LOCK(p); if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { PROC_UNLOCK(p); error = EINVAL; } else { PROC_UNLOCK(p); itimer_enter(it); error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val)); itimer_leave(it); ITIMER_UNLOCK(it); } return (error); } #ifndef _SYS_SYSPROTO_H_ struct timer_getoverrun_args { int timerid; }; #endif int sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap) { return (kern_ktimer_getoverrun(td, uap->timerid)); } int kern_ktimer_getoverrun(struct thread *td, int timer_id) { struct proc *p = td->td_proc; struct itimer *it; int error ; PROC_LOCK(p); if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { PROC_UNLOCK(p); error = EINVAL; } else { td->td_retval[0] = it->it_overrun_last; ITIMER_UNLOCK(it); PROC_UNLOCK(p); error = 0; } return (error); } static int realtimer_create(struct itimer *it) { callout_init_mtx(&it->it_callout, &it->it_mtx, 0); return (0); } static int realtimer_delete(struct itimer *it) { mtx_assert(&it->it_mtx, MA_OWNED); /* * clear timer's value and interval to tell realtimer_expire * to not rearm the timer. */ timespecclear(&it->it_time.it_value); timespecclear(&it->it_time.it_interval); ITIMER_UNLOCK(it); callout_drain(&it->it_callout); ITIMER_LOCK(it); return (0); } static int realtimer_gettime(struct itimer *it, struct itimerspec *ovalue) { struct timespec cts; mtx_assert(&it->it_mtx, MA_OWNED); realtimer_clocktime(it->it_clockid, &cts); *ovalue = it->it_time; if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) { timespecsub(&ovalue->it_value, &cts); if (ovalue->it_value.tv_sec < 0 || (ovalue->it_value.tv_sec == 0 && ovalue->it_value.tv_nsec == 0)) { ovalue->it_value.tv_sec = 0; ovalue->it_value.tv_nsec = 1; } } return (0); } static int realtimer_settime(struct itimer *it, int flags, struct itimerspec *value, struct itimerspec *ovalue) { struct timespec cts, ts; struct timeval tv; struct itimerspec val; mtx_assert(&it->it_mtx, MA_OWNED); val = *value; if (itimespecfix(&val.it_value)) return (EINVAL); if (timespecisset(&val.it_value)) { if (itimespecfix(&val.it_interval)) return (EINVAL); } else { timespecclear(&val.it_interval); } if (ovalue != NULL) realtimer_gettime(it, ovalue); it->it_time = val; if (timespecisset(&val.it_value)) { realtimer_clocktime(it->it_clockid, &cts); ts = val.it_value; if ((flags & TIMER_ABSTIME) == 0) { /* Convert to absolute time. */ timespecadd(&it->it_time.it_value, &cts); } else { timespecsub(&ts, &cts); /* * We don't care if ts is negative, tztohz will * fix it. */ } TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, it); } else { callout_stop(&it->it_callout); } return (0); } static void realtimer_clocktime(clockid_t id, struct timespec *ts) { if (id == CLOCK_REALTIME) getnanotime(ts); else /* CLOCK_MONOTONIC */ getnanouptime(ts); } int itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi) { struct itimer *it; PROC_LOCK_ASSERT(p, MA_OWNED); it = itimer_find(p, timerid); if (it != NULL) { ksi->ksi_overrun = it->it_overrun; it->it_overrun_last = it->it_overrun; it->it_overrun = 0; ITIMER_UNLOCK(it); return (0); } return (EINVAL); } int itimespecfix(struct timespec *ts) { if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) return (EINVAL); if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) ts->tv_nsec = tick * 1000; return (0); } /* Timeout callback for realtime timer */ static void realtimer_expire(void *arg) { struct timespec cts, ts; struct timeval tv; struct itimer *it; it = (struct itimer *)arg; realtimer_clocktime(it->it_clockid, &cts); /* Only fire if time is reached. */ if (timespeccmp(&cts, &it->it_time.it_value, >=)) { if (timespecisset(&it->it_time.it_interval)) { timespecadd(&it->it_time.it_value, &it->it_time.it_interval); while (timespeccmp(&cts, &it->it_time.it_value, >=)) { if (it->it_overrun < INT_MAX) it->it_overrun++; else it->it_ksi.ksi_errno = ERANGE; timespecadd(&it->it_time.it_value, &it->it_time.it_interval); } } else { /* single shot timer ? */ timespecclear(&it->it_time.it_value); } if (timespecisset(&it->it_time.it_value)) { ts = it->it_time.it_value; timespecsub(&ts, &cts); TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, it); } itimer_enter(it); ITIMER_UNLOCK(it); itimer_fire(it); ITIMER_LOCK(it); itimer_leave(it); } else if (timespecisset(&it->it_time.it_value)) { ts = it->it_time.it_value; timespecsub(&ts, &cts); TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, it); } } void itimer_fire(struct itimer *it) { struct proc *p = it->it_proc; struct thread *td; if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { if (sigev_findtd(p, &it->it_sigev, &td) != 0) { ITIMER_LOCK(it); timespecclear(&it->it_time.it_value); timespecclear(&it->it_time.it_interval); callout_stop(&it->it_callout); ITIMER_UNLOCK(it); return; } if (!KSI_ONQ(&it->it_ksi)) { it->it_ksi.ksi_errno = 0; ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev); tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi); } else { if (it->it_overrun < INT_MAX) it->it_overrun++; else it->it_ksi.ksi_errno = ERANGE; } PROC_UNLOCK(p); } } static void itimers_alloc(struct proc *p) { struct itimers *its; int i; its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO); LIST_INIT(&its->its_virtual); LIST_INIT(&its->its_prof); TAILQ_INIT(&its->its_worklist); for (i = 0; i < TIMER_MAX; i++) its->its_timers[i] = NULL; PROC_LOCK(p); if (p->p_itimers == NULL) { p->p_itimers = its; PROC_UNLOCK(p); } else { PROC_UNLOCK(p); free(its, M_SUBPROC); } } static void itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp __unused) { itimers_event_hook_exit(arg, p); } /* Clean up timers when some process events are being triggered. */ static void itimers_event_hook_exit(void *arg, struct proc *p) { struct itimers *its; struct itimer *it; int event = (int)(intptr_t)arg; int i; if (p->p_itimers != NULL) { its = p->p_itimers; for (i = 0; i < MAX_CLOCKS; ++i) { if (posix_clocks[i].event_hook != NULL) CLOCK_CALL(i, event_hook, (p, i, event)); } /* * According to susv3, XSI interval timers should be inherited * by new image. */ if (event == ITIMER_EV_EXEC) i = 3; else if (event == ITIMER_EV_EXIT) i = 0; else panic("unhandled event"); for (; i < TIMER_MAX; ++i) { if ((it = its->its_timers[i]) != NULL) kern_ktimer_delete(curthread, i); } if (its->its_timers[0] == NULL && its->its_timers[1] == NULL && its->its_timers[2] == NULL) { free(its, M_SUBPROC); p->p_itimers = NULL; } } } Index: projects/clang400-import/sys/kern/subr_clock.c =================================================================== --- projects/clang400-import/sys/kern/subr_clock.c (revision 312719) +++ projects/clang400-import/sys/kern/subr_clock.c (revision 312720) @@ -1,224 +1,238 @@ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1990, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: clock.c 1.18 91/01/21$ * from: @(#)clock.c 8.2 (Berkeley) 1/12/94 * from: NetBSD: clock_subr.c,v 1.6 2001/07/07 17:04:02 thorpej Exp * and * from: src/sys/i386/isa/clock.c,v 1.176 2001/09/04 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include int tz_minuteswest; int tz_dsttime; /* * The adjkerntz and wall_cmos_clock sysctls are in the "machdep" sysctl * namespace because they were misplaced there originally. */ static int adjkerntz; static int sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) { int error; error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); if (!error && req->newptr) resettodr(); return (error); } SYSCTL_PROC(_machdep, OID_AUTO, adjkerntz, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", "Local offset from UTC in seconds"); static int ct_debug; SYSCTL_INT(_debug, OID_AUTO, clocktime, CTLFLAG_RW, &ct_debug, 0, "Enable printing of clocktime debugging"); static int wall_cmos_clock; SYSCTL_INT(_machdep, OID_AUTO, wall_cmos_clock, CTLFLAG_RW, &wall_cmos_clock, 0, "Enables application of machdep.adjkerntz"); /*--------------------------------------------------------------------* * Generic routines to convert between a POSIX date * (seconds since 1/1/1970) and yr/mo/day/hr/min/sec * Derived from NetBSD arch/hp300/hp300/clock.c */ #define FEBRUARY 2 #define days_in_year(y) (leapyear(y) ? 366 : 365) #define days_in_month(y, m) \ (month_days[(m) - 1] + (m == FEBRUARY ? leapyear(y) : 0)) /* Day of week. Days are counted from 1/1/1970, which was a Thursday */ #define day_of_week(days) (((days) + 4) % 7) static const int month_days[12] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; /* * This inline avoids some unnecessary modulo operations * as compared with the usual macro: * ( ((year % 4) == 0 && * (year % 100) != 0) || * ((year % 400) == 0) ) * It is otherwise equivalent. */ static int leapyear(int year) { int rv = 0; if ((year & 3) == 0) { rv = 1; if ((year % 100) == 0) { rv = 0; if ((year % 400) == 0) rv = 1; } } return (rv); } static void print_ct(struct clocktime *ct) { printf("[%04d-%02d-%02d %02d:%02d:%02d]", ct->year, ct->mon, ct->day, ct->hour, ct->min, ct->sec); } int clock_ct_to_ts(struct clocktime *ct, struct timespec *ts) { int i, year, days; year = ct->year; if (ct_debug) { printf("ct_to_ts("); print_ct(ct); printf(")"); } /* Sanity checks. */ if (ct->mon < 1 || ct->mon > 12 || ct->day < 1 || ct->day > days_in_month(year, ct->mon) || ct->hour > 23 || ct->min > 59 || ct->sec > 59 || (sizeof(time_t) == 4 && year > 2037)) { /* time_t overflow */ if (ct_debug) printf(" = EINVAL\n"); return (EINVAL); } /* * Compute days since start of time * First from years, then from months. */ days = 0; for (i = POSIX_BASE_YEAR; i < year; i++) days += days_in_year(i); /* Months */ for (i = 1; i < ct->mon; i++) days += days_in_month(year, i); days += (ct->day - 1); ts->tv_sec = (((time_t)days * 24 + ct->hour) * 60 + ct->min) * 60 + ct->sec; ts->tv_nsec = ct->nsec; if (ct_debug) printf(" = %ld.%09ld\n", (long)ts->tv_sec, (long)ts->tv_nsec); return (0); } void clock_ts_to_ct(struct timespec *ts, struct clocktime *ct) { - int i, year, days; + time_t i, year, days; time_t rsec; /* remainder seconds */ time_t secs; secs = ts->tv_sec; days = secs / SECDAY; rsec = secs % SECDAY; ct->dow = day_of_week(days); /* Subtract out whole years, counting them in i. */ for (year = POSIX_BASE_YEAR; days >= days_in_year(year); year++) days -= days_in_year(year); ct->year = year; /* Subtract out whole months, counting them in i. */ for (i = 1; days >= days_in_month(year, i); i++) days -= days_in_month(year, i); ct->mon = i; /* Days are what is left over (+1) from all that. */ ct->day = days + 1; /* Hours, minutes, seconds are easy */ ct->hour = rsec / 3600; rsec = rsec % 3600; ct->min = rsec / 60; rsec = rsec % 60; ct->sec = rsec; ct->nsec = ts->tv_nsec; if (ct_debug) { printf("ts_to_ct(%ld.%09ld) = ", (long)ts->tv_sec, (long)ts->tv_nsec); print_ct(ct); printf("\n"); } + + KASSERT(ct->year >= 0 && ct->year < 10000, + ("year %d isn't a 4 digit year", ct->year)); + KASSERT(ct->mon >= 1 && ct->mon <= 12, + ("month %d not in 1-12", ct->mon)); + KASSERT(ct->day >= 1 && ct->day <= 31, + ("day %d not in 1-31", ct->day)); + KASSERT(ct->hour >= 0 && ct->hour <= 23, + ("hour %d not in 0-23", ct->hour)); + KASSERT(ct->min >= 0 && ct->min <= 59, + ("minute %d not in 0-59", ct->min)); + /* Not sure if this interface needs to handle leapseconds or not. */ + KASSERT(ct->sec >= 0 && ct->sec <= 60, + ("seconds %d not in 0-60", ct->sec)); } int utc_offset(void) { return (tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0)); } Index: projects/clang400-import/sys/kern/subr_gtaskqueue.c =================================================================== --- projects/clang400-import/sys/kern/subr_gtaskqueue.c (revision 312719) +++ projects/clang400-import/sys/kern/subr_gtaskqueue.c (revision 312720) @@ -1,937 +1,961 @@ /*- * Copyright (c) 2000 Doug Rabson * Copyright (c) 2014 Jeff Roberson * Copyright (c) 2016 Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_GTASKQUEUE, "taskqueue", "Task Queues"); static void gtaskqueue_thread_enqueue(void *); static void gtaskqueue_thread_loop(void *arg); struct gtaskqueue_busy { struct gtask *tb_running; TAILQ_ENTRY(gtaskqueue_busy) tb_link; }; static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1; struct gtaskqueue { STAILQ_HEAD(, gtask) tq_queue; gtaskqueue_enqueue_fn tq_enqueue; void *tq_context; char *tq_name; TAILQ_HEAD(, gtaskqueue_busy) tq_active; struct mtx tq_mutex; struct thread **tq_threads; int tq_tcount; int tq_spin; int tq_flags; int tq_callouts; taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS]; void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS]; }; #define TQ_FLAGS_ACTIVE (1 << 0) #define TQ_FLAGS_BLOCKED (1 << 1) #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2) #define DT_CALLOUT_ARMED (1 << 0) #define TQ_LOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_lock_spin(&(tq)->tq_mutex); \ else \ mtx_lock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED) #define TQ_UNLOCK(tq) \ do { \ if ((tq)->tq_spin) \ mtx_unlock_spin(&(tq)->tq_mutex); \ else \ mtx_unlock(&(tq)->tq_mutex); \ } while (0) #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED) #ifdef INVARIANTS static void gtask_dump(struct gtask *gtask) { printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n", gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context); } #endif static __inline int TQ_SLEEP(struct gtaskqueue *tq, void *p, struct mtx *m, int pri, const char *wm, int t) { if (tq->tq_spin) return (msleep_spin(p, m, wm, t)); return (msleep(p, m, pri, wm, t)); } static struct gtaskqueue * _gtaskqueue_create(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context, int mtxflags, const char *mtxname __unused) { struct gtaskqueue *queue; char *tq_name; tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO); if (!tq_name) return (NULL); snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue"); queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO); if (!queue) return (NULL); STAILQ_INIT(&queue->tq_queue); TAILQ_INIT(&queue->tq_active); queue->tq_enqueue = enqueue; queue->tq_context = context; queue->tq_name = tq_name; queue->tq_spin = (mtxflags & MTX_SPIN) != 0; queue->tq_flags |= TQ_FLAGS_ACTIVE; if (enqueue == gtaskqueue_thread_enqueue) queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE; mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags); return (queue); } /* * Signal a taskqueue thread to terminate. */ static void gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq) { while (tq->tq_tcount > 0 || tq->tq_callouts > 0) { wakeup(tq); TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0); } } static void gtaskqueue_free(struct gtaskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_ACTIVE; gtaskqueue_terminate(queue->tq_threads, queue); KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?")); KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks")); mtx_destroy(&queue->tq_mutex); free(queue->tq_threads, M_GTASKQUEUE); free(queue->tq_name, M_GTASKQUEUE); free(queue, M_GTASKQUEUE); } int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask) { #ifdef INVARIANTS if (queue == NULL) { gtask_dump(gtask); panic("queue == NULL"); } #endif TQ_LOCK(queue); if (gtask->ta_flags & TASK_ENQUEUED) { TQ_UNLOCK(queue); return (0); } STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link); gtask->ta_flags |= TASK_ENQUEUED; TQ_UNLOCK(queue); if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) queue->tq_enqueue(queue->tq_context); return (0); } static void gtaskqueue_task_nop_fn(void *context) { } /* * Block until all currently queued tasks in this taskqueue * have begun execution. Tasks queued during execution of * this function are ignored. */ static void gtaskqueue_drain_tq_queue(struct gtaskqueue *queue) { struct gtask t_barrier; if (STAILQ_EMPTY(&queue->tq_queue)) return; /* * Enqueue our barrier after all current tasks, but with * the highest priority so that newly queued tasks cannot * pass it. Because of the high priority, we can not use * taskqueue_enqueue_locked directly (which drops the lock * anyway) so just insert it at tail while we have the * queue lock. */ GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier); STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link); t_barrier.ta_flags |= TASK_ENQUEUED; /* * Once the barrier has executed, all previously queued tasks * have completed or are currently executing. */ while (t_barrier.ta_flags & TASK_ENQUEUED) TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0); } /* * Block until all currently executing tasks for this taskqueue * complete. Tasks that begin execution during the execution * of this function are ignored. */ static void gtaskqueue_drain_tq_active(struct gtaskqueue *queue) { struct gtaskqueue_busy tb_marker, *tb_first; if (TAILQ_EMPTY(&queue->tq_active)) return; /* Block taskq_terminate().*/ queue->tq_callouts++; /* * Wait for all currently executing taskqueue threads * to go idle. */ tb_marker.tb_running = TB_DRAIN_WAITER; TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link); while (TAILQ_FIRST(&queue->tq_active) != &tb_marker) TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0); TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link); /* * Wakeup any other drain waiter that happened to queue up * without any intervening active thread. */ tb_first = TAILQ_FIRST(&queue->tq_active); if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER) wakeup(tb_first); /* Release taskqueue_terminate(). */ queue->tq_callouts--; if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0) wakeup_one(queue->tq_threads); } void gtaskqueue_block(struct gtaskqueue *queue) { TQ_LOCK(queue); queue->tq_flags |= TQ_FLAGS_BLOCKED; TQ_UNLOCK(queue); } void gtaskqueue_unblock(struct gtaskqueue *queue) { TQ_LOCK(queue); queue->tq_flags &= ~TQ_FLAGS_BLOCKED; if (!STAILQ_EMPTY(&queue->tq_queue)) queue->tq_enqueue(queue->tq_context); TQ_UNLOCK(queue); } static void gtaskqueue_run_locked(struct gtaskqueue *queue) { struct gtaskqueue_busy tb; struct gtaskqueue_busy *tb_first; struct gtask *gtask; KASSERT(queue != NULL, ("tq is NULL")); TQ_ASSERT_LOCKED(queue); tb.tb_running = NULL; while (STAILQ_FIRST(&queue->tq_queue)) { TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link); /* * Carefully remove the first task from the queue and * clear its TASK_ENQUEUED flag */ gtask = STAILQ_FIRST(&queue->tq_queue); KASSERT(gtask != NULL, ("task is NULL")); STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link); gtask->ta_flags &= ~TASK_ENQUEUED; tb.tb_running = gtask; TQ_UNLOCK(queue); KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL")); gtask->ta_func(gtask->ta_context); TQ_LOCK(queue); tb.tb_running = NULL; wakeup(gtask); TAILQ_REMOVE(&queue->tq_active, &tb, tb_link); tb_first = TAILQ_FIRST(&queue->tq_active); if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER) wakeup(tb_first); } } static int task_is_running(struct gtaskqueue *queue, struct gtask *gtask) { struct gtaskqueue_busy *tb; TQ_ASSERT_LOCKED(queue); TAILQ_FOREACH(tb, &queue->tq_active, tb_link) { if (tb->tb_running == gtask) return (1); } return (0); } static int gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask) { if (gtask->ta_flags & TASK_ENQUEUED) STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link); gtask->ta_flags &= ~TASK_ENQUEUED; return (task_is_running(queue, gtask) ? EBUSY : 0); } int gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask) { int error; TQ_LOCK(queue); error = gtaskqueue_cancel_locked(queue, gtask); TQ_UNLOCK(queue); return (error); } void gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask)) TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0); TQ_UNLOCK(queue); } void gtaskqueue_drain_all(struct gtaskqueue *queue) { if (!queue->tq_spin) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); TQ_LOCK(queue); gtaskqueue_drain_tq_queue(queue); gtaskqueue_drain_tq_active(queue); TQ_UNLOCK(queue); } static int _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri, cpuset_t *mask, const char *name, va_list ap) { char ktname[MAXCOMLEN + 1]; struct thread *td; struct gtaskqueue *tq; int i, error; if (count <= 0) return (EINVAL); vsnprintf(ktname, sizeof(ktname), name, ap); tq = *tqp; tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE, M_NOWAIT | M_ZERO); if (tq->tq_threads == NULL) { printf("%s: no memory for %s threads\n", __func__, ktname); return (ENOMEM); } for (i = 0; i < count; i++) { if (count == 1) error = kthread_add(gtaskqueue_thread_loop, tqp, NULL, &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname); else error = kthread_add(gtaskqueue_thread_loop, tqp, NULL, &tq->tq_threads[i], RFSTOPPED, 0, "%s_%d", ktname, i); if (error) { /* should be ok to continue, taskqueue_free will dtrt */ printf("%s: kthread_add(%s): error %d", __func__, ktname, error); tq->tq_threads[i] = NULL; /* paranoid */ } else tq->tq_tcount++; } for (i = 0; i < count; i++) { if (tq->tq_threads[i] == NULL) continue; td = tq->tq_threads[i]; if (mask) { error = cpuset_setthread(td->td_tid, mask); /* * Failing to pin is rarely an actual fatal error; * it'll just affect performance. */ if (error) printf("%s: curthread=%llu: can't pin; " "error=%d\n", __func__, (unsigned long long) td->td_tid, error); } thread_lock(td); sched_prio(td, pri); sched_add(td, SRQ_BORING); thread_unlock(td); } return (0); } static int gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri, const char *name, ...) { va_list ap; int error; va_start(ap, name); error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap); va_end(ap); return (error); } static inline void gtaskqueue_run_callback(struct gtaskqueue *tq, enum taskqueue_callback_type cb_type) { taskqueue_callback_fn tq_callback; TQ_ASSERT_UNLOCKED(tq); tq_callback = tq->tq_callbacks[cb_type]; if (tq_callback != NULL) tq_callback(tq->tq_cb_contexts[cb_type]); } static void gtaskqueue_thread_loop(void *arg) { struct gtaskqueue **tqp, *tq; tqp = arg; tq = *tqp; gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT); TQ_LOCK(tq); while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) { /* XXX ? */ gtaskqueue_run_locked(tq); /* * Because taskqueue_run() can drop tq_mutex, we need to * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the * meantime, which means we missed a wakeup. */ if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0) break; TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0); } gtaskqueue_run_locked(tq); /* * This thread is on its way out, so just drop the lock temporarily * in order to call the shutdown callback. This allows the callback * to look at the taskqueue, even just before it dies. */ TQ_UNLOCK(tq); gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN); TQ_LOCK(tq); /* rendezvous with thread that asked us to terminate */ tq->tq_tcount--; wakeup_one(tq->tq_threads); TQ_UNLOCK(tq); kthread_exit(); } static void gtaskqueue_thread_enqueue(void *context) { struct gtaskqueue **tqp, *tq; tqp = context; tq = *tqp; wakeup_one(tq); } static struct gtaskqueue * gtaskqueue_create_fast(const char *name, int mflags, taskqueue_enqueue_fn enqueue, void *context) { return _gtaskqueue_create(name, mflags, enqueue, context, MTX_SPIN, "fast_taskqueue"); } struct taskqgroup_cpu { LIST_HEAD(, grouptask) tgc_tasks; struct gtaskqueue *tgc_taskq; int tgc_cnt; int tgc_cpu; }; struct taskqgroup { struct taskqgroup_cpu tqg_queue[MAXCPU]; struct mtx tqg_lock; char * tqg_name; int tqg_adjusting; int tqg_stride; int tqg_cnt; }; struct taskq_bind_task { struct gtask bt_task; int bt_cpuid; }; static void taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu) { struct taskqgroup_cpu *qcpu; qcpu = &qgroup->tqg_queue[idx]; LIST_INIT(&qcpu->tgc_tasks); qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK, taskqueue_thread_enqueue, &qcpu->tgc_taskq); gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT, "%s_%d", qgroup->tqg_name, idx); qcpu->tgc_cpu = cpu; } static void taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx) { gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq); } /* * Find the taskq with least # of tasks that doesn't currently have any * other queues from the uniq identifier. */ static int taskqgroup_find(struct taskqgroup *qgroup, void *uniq) { struct grouptask *n; int i, idx, mincnt; int strict; mtx_assert(&qgroup->tqg_lock, MA_OWNED); if (qgroup->tqg_cnt == 0) return (0); idx = -1; mincnt = INT_MAX; /* * Two passes; First scan for a queue with the least tasks that * does not already service this uniq id. If that fails simply find * the queue with the least total tasks; */ for (strict = 1; mincnt == INT_MAX; strict = 0) { for (i = 0; i < qgroup->tqg_cnt; i++) { if (qgroup->tqg_queue[i].tgc_cnt > mincnt) continue; if (strict) { LIST_FOREACH(n, &qgroup->tqg_queue[i].tgc_tasks, gt_list) if (n->gt_uniq == uniq) break; if (n != NULL) continue; } mincnt = qgroup->tqg_queue[i].tgc_cnt; idx = i; } } if (idx == -1) panic("taskqgroup_find: Failed to pick a qid."); return (idx); } +/* + * smp_started is unusable since it is not set for UP kernels or even for + * SMP kernels when there is 1 CPU. This is usually handled by adding a + * (mp_ncpus == 1) test, but that would be broken here since we need to + * to synchronize with the SI_SUB_SMP ordering. Even in the pure SMP case + * smp_started only gives a fuzzy ordering relative to SI_SUB_SMP. + * + * So maintain our own flag. It must be set after all CPUs are started + * and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed + * adjustment is properly delayed. SI_ORDER_FOURTH is clearly before + * SI_ORDER_ANY and unclearly after the CPUs are started. It would be + * simpler for adjustment to pass a flag indicating if it is delayed. + */ +static int tqg_smp_started; +static void +tqg_record_smp_started(void *arg) +{ + tqg_smp_started = 1; +} + +SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH, + tqg_record_smp_started, NULL); + void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask, void *uniq, int irq, char *name) { cpuset_t mask; int qid; gtask->gt_uniq = uniq; gtask->gt_name = name; gtask->gt_irq = irq; gtask->gt_cpu = -1; mtx_lock(&qgroup->tqg_lock); qid = taskqgroup_find(qgroup, uniq); qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; - if (irq != -1 && (smp_started || mp_ncpus == 1)) { + if (irq != -1 && tqg_smp_started ) { gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu; CPU_ZERO(&mask); CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask); mtx_unlock(&qgroup->tqg_lock); intr_setaffinity(irq, &mask); } else mtx_unlock(&qgroup->tqg_lock); } static void taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) { cpuset_t mask; int qid, cpu; mtx_lock(&qgroup->tqg_lock); qid = taskqgroup_find(qgroup, gtask->gt_uniq); cpu = qgroup->tqg_queue[qid].tgc_cpu; if (gtask->gt_irq != -1) { mtx_unlock(&qgroup->tqg_lock); CPU_ZERO(&mask); CPU_SET(cpu, &mask); intr_setaffinity(gtask->gt_irq, &mask); mtx_lock(&qgroup->tqg_lock); } qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; mtx_unlock(&qgroup->tqg_lock); } int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask, void *uniq, int cpu, int irq, char *name) { cpuset_t mask; int i, qid; qid = -1; gtask->gt_uniq = uniq; gtask->gt_name = name; gtask->gt_irq = irq; gtask->gt_cpu = cpu; mtx_lock(&qgroup->tqg_lock); - if (smp_started || mp_ncpus == 1) { + if (tqg_smp_started) { for (i = 0; i < qgroup->tqg_cnt; i++) if (qgroup->tqg_queue[i].tgc_cpu == cpu) { qid = i; break; } if (qid == -1) { mtx_unlock(&qgroup->tqg_lock); return (EINVAL); } } else qid = 0; qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; cpu = qgroup->tqg_queue[qid].tgc_cpu; mtx_unlock(&qgroup->tqg_lock); CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (irq != -1 && (smp_started || mp_ncpus == 1)) intr_setaffinity(irq, &mask); return (0); } static int taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask) { cpuset_t mask; int i, qid, irq, cpu; qid = -1; irq = gtask->gt_irq; cpu = gtask->gt_cpu; - MPASS(smp_started || mp_ncpus == 1); + MPASS(tqg_smp_started); mtx_lock(&qgroup->tqg_lock); for (i = 0; i < qgroup->tqg_cnt; i++) if (qgroup->tqg_queue[i].tgc_cpu == cpu) { qid = i; break; } if (qid == -1) { mtx_unlock(&qgroup->tqg_lock); return (EINVAL); } qgroup->tqg_queue[qid].tgc_cnt++; LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list); MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL); gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; mtx_unlock(&qgroup->tqg_lock); CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (irq != -1) intr_setaffinity(irq, &mask); return (0); } void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask) { int i; mtx_lock(&qgroup->tqg_lock); for (i = 0; i < qgroup->tqg_cnt; i++) if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue) break; if (i == qgroup->tqg_cnt) panic("taskqgroup_detach: task not in group\n"); qgroup->tqg_queue[i].tgc_cnt--; LIST_REMOVE(gtask, gt_list); mtx_unlock(&qgroup->tqg_lock); gtask->gt_taskqueue = NULL; } static void taskqgroup_binder(void *ctx) { struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx; cpuset_t mask; int error; CPU_ZERO(&mask); CPU_SET(gtask->bt_cpuid, &mask); error = cpuset_setthread(curthread->td_tid, &mask); thread_lock(curthread); sched_bind(curthread, gtask->bt_cpuid); thread_unlock(curthread); if (error) printf("taskqgroup_binder: setaffinity failed: %d\n", error); free(gtask, M_DEVBUF); } static void taskqgroup_bind(struct taskqgroup *qgroup) { struct taskq_bind_task *gtask; int i; /* * Bind taskqueue threads to specific CPUs, if they have been assigned * one. */ if (qgroup->tqg_cnt == 1) return; for (i = 0; i < qgroup->tqg_cnt; i++) { gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK); GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask); gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu; grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq, >ask->bt_task); } } static int _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) { LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL); struct grouptask *gtask; int i, k, old_cnt, old_cpu, cpu; mtx_assert(&qgroup->tqg_lock, MA_OWNED); - if (cnt < 1 || cnt * stride > mp_ncpus || (!smp_started && (mp_ncpus != 1))) { - printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n", - cnt, stride, mp_ncpus, smp_started); + if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) { + printf("%s: failed cnt: %d stride: %d " + "mp_ncpus: %d smp_started: %d\n", + __func__, cnt, stride, mp_ncpus, smp_started); return (EINVAL); } if (qgroup->tqg_adjusting) { printf("taskqgroup_adjust failed: adjusting\n"); return (EBUSY); } qgroup->tqg_adjusting = 1; old_cnt = qgroup->tqg_cnt; old_cpu = 0; if (old_cnt < cnt) old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu; mtx_unlock(&qgroup->tqg_lock); /* * Set up queue for tasks added before boot. */ if (old_cnt == 0) { LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks, grouptask, gt_list); qgroup->tqg_queue[0].tgc_cnt = 0; } /* * If new taskq threads have been added. */ cpu = old_cpu; for (i = old_cnt; i < cnt; i++) { taskqgroup_cpu_create(qgroup, i, cpu); for (k = 0; k < stride; k++) cpu = CPU_NEXT(cpu); } mtx_lock(&qgroup->tqg_lock); qgroup->tqg_cnt = cnt; qgroup->tqg_stride = stride; /* * Adjust drivers to use new taskqs. */ for (i = 0; i < old_cnt; i++) { while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) { LIST_REMOVE(gtask, gt_list); qgroup->tqg_queue[i].tgc_cnt--; LIST_INSERT_HEAD(>ask_head, gtask, gt_list); } } mtx_unlock(&qgroup->tqg_lock); while ((gtask = LIST_FIRST(>ask_head))) { LIST_REMOVE(gtask, gt_list); if (gtask->gt_cpu == -1) taskqgroup_attach_deferred(qgroup, gtask); else if (taskqgroup_attach_cpu_deferred(qgroup, gtask)) taskqgroup_attach_deferred(qgroup, gtask); } #ifdef INVARIANTS mtx_lock(&qgroup->tqg_lock); for (i = 0; i < qgroup->tqg_cnt; i++) { MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL); LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) MPASS(gtask->gt_taskqueue != NULL); } mtx_unlock(&qgroup->tqg_lock); #endif /* * If taskq thread count has been reduced. */ for (i = cnt; i < old_cnt; i++) taskqgroup_cpu_remove(qgroup, i); taskqgroup_bind(qgroup); mtx_lock(&qgroup->tqg_lock); qgroup->tqg_adjusting = 0; return (0); } int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride) { int error; mtx_lock(&qgroup->tqg_lock); error = _taskqgroup_adjust(qgroup, cnt, stride); mtx_unlock(&qgroup->tqg_lock); return (error); } struct taskqgroup * taskqgroup_create(char *name) { struct taskqgroup *qgroup; qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO); mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF); qgroup->tqg_name = name; LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks); return (qgroup); } void taskqgroup_destroy(struct taskqgroup *qgroup) { } Index: projects/clang400-import/sys/kern/vfs_lookup.c =================================================================== --- projects/clang400-import/sys/kern/vfs_lookup.c (revision 312719) +++ projects/clang400-import/sys/kern/vfs_lookup.c (revision 312720) @@ -1,1434 +1,1435 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)vfs_lookup.c 8.4 (Berkeley) 2/16/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include "opt_ktrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #define NAMEI_DIAGNOSTIC 1 #undef NAMEI_DIAGNOSTIC SDT_PROVIDER_DECLARE(vfs); SDT_PROBE_DEFINE3(vfs, namei, lookup, entry, "struct vnode *", "char *", "unsigned long"); SDT_PROBE_DEFINE2(vfs, namei, lookup, return, "int", "struct vnode *"); /* Allocation zone for namei. */ uma_zone_t namei_zone; /* Placeholder vnode for mp traversal. */ static struct vnode *vp_crossmp; static int crossmp_vop_islocked(struct vop_islocked_args *ap) { return (LK_SHARED); } static int crossmp_vop_lock1(struct vop_lock1_args *ap) { struct vnode *vp; struct lock *lk; const char *file; int flags, line; vp = ap->a_vp; lk = vp->v_vnlock; flags = ap->a_flags; file = ap->a_file; line = ap->a_line; if ((flags & LK_SHARED) == 0) panic("invalid lock request for crossmp"); WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, file, line, flags & LK_INTERLOCK ? &VI_MTX(vp)->lock_object : NULL); WITNESS_LOCK(&lk->lock_object, 0, file, line); if ((flags & LK_INTERLOCK) != 0) VI_UNLOCK(vp); LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, ap->a_file, line); return (0); } static int crossmp_vop_unlock(struct vop_unlock_args *ap) { struct vnode *vp; struct lock *lk; int flags; vp = ap->a_vp; lk = vp->v_vnlock; flags = ap->a_flags; if ((flags & LK_INTERLOCK) != 0) VI_UNLOCK(vp); WITNESS_UNLOCK(&lk->lock_object, 0, LOCK_FILE, LOCK_LINE); LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, LOCK_FILE, LOCK_LINE); return (0); } static struct vop_vector crossmp_vnodeops = { + .vop_default = &default_vnodeops, .vop_islocked = crossmp_vop_islocked, .vop_lock1 = crossmp_vop_lock1, .vop_unlock = crossmp_vop_unlock, }; struct nameicap_tracker { struct vnode *dp; TAILQ_ENTRY(nameicap_tracker) nm_link; }; /* Zone for cap mode tracker elements used for dotdot capability checks. */ static uma_zone_t nt_zone; static void nameiinit(void *dummy __unused) { namei_zone = uma_zcreate("NAMEI", MAXPATHLEN, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); nt_zone = uma_zcreate("rentr", sizeof(struct nameicap_tracker), NULL, NULL, NULL, NULL, sizeof(void *), 0); getnewvnode("crossmp", NULL, &crossmp_vnodeops, &vp_crossmp); } SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nameiinit, NULL); static int lookup_shared = 1; SYSCTL_INT(_vfs, OID_AUTO, lookup_shared, CTLFLAG_RWTUN, &lookup_shared, 0, "enables shared locks for path name translation"); static int lookup_cap_dotdot = 1; SYSCTL_INT(_vfs, OID_AUTO, lookup_cap_dotdot, CTLFLAG_RWTUN, &lookup_cap_dotdot, 0, "enables \"..\" components in path lookup in capability mode"); static int lookup_cap_dotdot_nonlocal = 1; SYSCTL_INT(_vfs, OID_AUTO, lookup_cap_dotdot_nonlocal, CTLFLAG_RWTUN, &lookup_cap_dotdot_nonlocal, 0, "enables \"..\" components in path lookup in capability mode " "on non-local mount"); static void nameicap_tracker_add(struct nameidata *ndp, struct vnode *dp) { struct nameicap_tracker *nt; if ((ndp->ni_lcf & NI_LCF_CAP_DOTDOT) == 0 || dp->v_type != VDIR) return; nt = uma_zalloc(nt_zone, M_WAITOK); vhold(dp); nt->dp = dp; TAILQ_INSERT_TAIL(&ndp->ni_cap_tracker, nt, nm_link); } static void nameicap_cleanup(struct nameidata *ndp) { struct nameicap_tracker *nt, *nt1; KASSERT(TAILQ_EMPTY(&ndp->ni_cap_tracker) || (ndp->ni_lcf & NI_LCF_CAP_DOTDOT) != 0, ("not strictrelative")); TAILQ_FOREACH_SAFE(nt, &ndp->ni_cap_tracker, nm_link, nt1) { TAILQ_REMOVE(&ndp->ni_cap_tracker, nt, nm_link); vdrop(nt->dp); uma_zfree(nt_zone, nt); } } /* * For dotdot lookups in capability mode, only allow the component * lookup to succeed if the resulting directory was already traversed * during the operation. Also fail dotdot lookups for non-local * filesystems, where external agents might assist local lookups to * escape the compartment. */ static int nameicap_check_dotdot(struct nameidata *ndp, struct vnode *dp) { struct nameicap_tracker *nt; struct mount *mp; if ((ndp->ni_lcf & NI_LCF_CAP_DOTDOT) == 0 || dp == NULL || dp->v_type != VDIR) return (0); mp = dp->v_mount; if (lookup_cap_dotdot_nonlocal == 0 && mp != NULL && (mp->mnt_flag & MNT_LOCAL) == 0) return (ENOTCAPABLE); TAILQ_FOREACH_REVERSE(nt, &ndp->ni_cap_tracker, nameicap_tracker_head, nm_link) { if (dp == nt->dp) return (0); } return (ENOTCAPABLE); } static void namei_cleanup_cnp(struct componentname *cnp) { uma_zfree(namei_zone, cnp->cn_pnbuf); #ifdef DIAGNOSTIC cnp->cn_pnbuf = NULL; cnp->cn_nameptr = NULL; #endif } static int namei_handle_root(struct nameidata *ndp, struct vnode **dpp) { struct componentname *cnp; cnp = &ndp->ni_cnd; if ((ndp->ni_lcf & NI_LCF_STRICTRELATIVE) != 0) { #ifdef KTRACE if (KTRPOINT(curthread, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_LOOKUP, NULL, NULL); #endif return (ENOTCAPABLE); } while (*(cnp->cn_nameptr) == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; } *dpp = ndp->ni_rootdir; vrefact(*dpp); return (0); } /* * Convert a pathname into a pointer to a locked vnode. * * The FOLLOW flag is set when symbolic links are to be followed * when they occur at the end of the name translation process. * Symbolic links are always followed for all other pathname * components other than the last. * * The segflg defines whether the name is to be copied from user * space or kernel space. * * Overall outline of namei: * * copy in name * get starting directory * while (!done && !error) { * call lookup to search path. * if symbolic link, massage name in buffer and continue * } */ int namei(struct nameidata *ndp) { struct filedesc *fdp; /* pointer to file descriptor state */ char *cp; /* pointer into pathname argument */ struct vnode *dp; /* the directory we are searching */ struct iovec aiov; /* uio for reading symbolic links */ struct componentname *cnp; struct thread *td; struct proc *p; cap_rights_t rights; struct uio auio; int error, linklen, startdir_used; cnp = &ndp->ni_cnd; td = cnp->cn_thread; p = td->td_proc; ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_thread->td_ucred; KASSERT(cnp->cn_cred && p, ("namei: bad cred/proc")); KASSERT((cnp->cn_nameiop & (~OPMASK)) == 0, ("namei: nameiop contaminated with flags")); KASSERT((cnp->cn_flags & OPMASK) == 0, ("namei: flags contaminated with nameiops")); MPASS(ndp->ni_startdir == NULL || ndp->ni_startdir->v_type == VDIR || ndp->ni_startdir->v_type == VBAD); if (!lookup_shared) cnp->cn_flags &= ~LOCKSHARED; fdp = p->p_fd; TAILQ_INIT(&ndp->ni_cap_tracker); ndp->ni_lcf = 0; /* We will set this ourselves if we need it. */ cnp->cn_flags &= ~TRAILINGSLASH; /* * Get a buffer for the name to be translated, and copy the * name into the buffer. */ if ((cnp->cn_flags & HASBUF) == 0) cnp->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK); if (ndp->ni_segflg == UIO_SYSSPACE) error = copystr(ndp->ni_dirp, cnp->cn_pnbuf, MAXPATHLEN, &ndp->ni_pathlen); else error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf, MAXPATHLEN, &ndp->ni_pathlen); /* * Don't allow empty pathnames. */ if (error == 0 && *cnp->cn_pnbuf == '\0') error = ENOENT; #ifdef CAPABILITY_MODE /* * In capability mode, lookups must be restricted to happen in * the subtree with the root specified by the file descriptor: * - The root must be real file descriptor, not the pseudo-descriptor * AT_FDCWD. * - The passed path must be relative and not absolute. * - If lookup_cap_dotdot is disabled, path must not contain the * '..' components. * - If lookup_cap_dotdot is enabled, we verify that all '..' * components lookups result in the directories which were * previously walked by us, which prevents an escape from * the relative root. */ if (error == 0 && IN_CAPABILITY_MODE(td) && (cnp->cn_flags & NOCAPCHECK) == 0) { ndp->ni_lcf |= NI_LCF_STRICTRELATIVE; if (ndp->ni_dirfd == AT_FDCWD) { #ifdef KTRACE if (KTRPOINT(td, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_LOOKUP, NULL, NULL); #endif error = ECAPMODE; } } #endif if (error != 0) { namei_cleanup_cnp(cnp); ndp->ni_vp = NULL; return (error); } ndp->ni_loopcnt = 0; #ifdef KTRACE if (KTRPOINT(td, KTR_NAMEI)) { KASSERT(cnp->cn_thread == curthread, ("namei not using curthread")); ktrnamei(cnp->cn_pnbuf); } #endif /* * Get starting point for the translation. */ FILEDESC_SLOCK(fdp); ndp->ni_rootdir = fdp->fd_rdir; vrefact(ndp->ni_rootdir); ndp->ni_topdir = fdp->fd_jdir; /* * If we are auditing the kernel pathname, save the user pathname. */ if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_UPATH1(td, ndp->ni_dirfd, cnp->cn_pnbuf); if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_UPATH2(td, ndp->ni_dirfd, cnp->cn_pnbuf); startdir_used = 0; dp = NULL; cnp->cn_nameptr = cnp->cn_pnbuf; if (cnp->cn_pnbuf[0] == '/') { error = namei_handle_root(ndp, &dp); } else { if (ndp->ni_startdir != NULL) { dp = ndp->ni_startdir; startdir_used = 1; } else if (ndp->ni_dirfd == AT_FDCWD) { dp = fdp->fd_cdir; vrefact(dp); } else { rights = ndp->ni_rightsneeded; cap_rights_set(&rights, CAP_LOOKUP); if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_ATFD1(ndp->ni_dirfd); if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_ATFD2(ndp->ni_dirfd); error = fgetvp_rights(td, ndp->ni_dirfd, &rights, &ndp->ni_filecaps, &dp); if (error == EINVAL) error = ENOTDIR; #ifdef CAPABILITIES /* * If file descriptor doesn't have all rights, * all lookups relative to it must also be * strictly relative. */ CAP_ALL(&rights); if (!cap_rights_contains(&ndp->ni_filecaps.fc_rights, &rights) || ndp->ni_filecaps.fc_fcntls != CAP_FCNTL_ALL || ndp->ni_filecaps.fc_nioctls != -1) { ndp->ni_lcf |= NI_LCF_STRICTRELATIVE; } #endif } if (error == 0 && dp->v_type != VDIR) error = ENOTDIR; } FILEDESC_SUNLOCK(fdp); if (ndp->ni_startdir != NULL && !startdir_used) vrele(ndp->ni_startdir); if (error != 0) { if (dp != NULL) vrele(dp); goto out; } if ((ndp->ni_lcf & NI_LCF_STRICTRELATIVE) != 0 && lookup_cap_dotdot != 0) ndp->ni_lcf |= NI_LCF_CAP_DOTDOT; SDT_PROBE3(vfs, namei, lookup, entry, dp, cnp->cn_pnbuf, cnp->cn_flags); for (;;) { ndp->ni_startdir = dp; error = lookup(ndp); if (error != 0) goto out; /* * If not a symbolic link, we're done. */ if ((cnp->cn_flags & ISSYMLINK) == 0) { vrele(ndp->ni_rootdir); if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0) { namei_cleanup_cnp(cnp); } else cnp->cn_flags |= HASBUF; nameicap_cleanup(ndp); SDT_PROBE2(vfs, namei, lookup, return, 0, ndp->ni_vp); return (0); } if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { error = ELOOP; break; } #ifdef MAC if ((cnp->cn_flags & NOMACCHECK) == 0) { error = mac_vnode_check_readlink(td->td_ucred, ndp->ni_vp); if (error != 0) break; } #endif if (ndp->ni_pathlen > 1) cp = uma_zalloc(namei_zone, M_WAITOK); else cp = cnp->cn_pnbuf; aiov.iov_base = cp; aiov.iov_len = MAXPATHLEN; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_resid = MAXPATHLEN; error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); if (error != 0) { if (ndp->ni_pathlen > 1) uma_zfree(namei_zone, cp); break; } linklen = MAXPATHLEN - auio.uio_resid; if (linklen == 0) { if (ndp->ni_pathlen > 1) uma_zfree(namei_zone, cp); error = ENOENT; break; } if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { if (ndp->ni_pathlen > 1) uma_zfree(namei_zone, cp); error = ENAMETOOLONG; break; } if (ndp->ni_pathlen > 1) { bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); uma_zfree(namei_zone, cnp->cn_pnbuf); cnp->cn_pnbuf = cp; } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; vput(ndp->ni_vp); dp = ndp->ni_dvp; /* * Check if root directory should replace current directory. */ cnp->cn_nameptr = cnp->cn_pnbuf; if (*(cnp->cn_nameptr) == '/') { vrele(dp); error = namei_handle_root(ndp, &dp); if (error != 0) goto out; } } vput(ndp->ni_vp); ndp->ni_vp = NULL; vrele(ndp->ni_dvp); out: vrele(ndp->ni_rootdir); namei_cleanup_cnp(cnp); nameicap_cleanup(ndp); SDT_PROBE2(vfs, namei, lookup, return, error, NULL); return (error); } static int compute_cn_lkflags(struct mount *mp, int lkflags, int cnflags) { if (mp == NULL || ((lkflags & LK_SHARED) && (!(mp->mnt_kern_flag & MNTK_LOOKUP_SHARED) || ((cnflags & ISDOTDOT) && (mp->mnt_kern_flag & MNTK_LOOKUP_EXCL_DOTDOT))))) { lkflags &= ~LK_SHARED; lkflags |= LK_EXCLUSIVE; } lkflags |= LK_NODDLKTREAT; return (lkflags); } static __inline int needs_exclusive_leaf(struct mount *mp, int flags) { /* * Intermediate nodes can use shared locks, we only need to * force an exclusive lock for leaf nodes. */ if ((flags & (ISLASTCN | LOCKLEAF)) != (ISLASTCN | LOCKLEAF)) return (0); /* Always use exclusive locks if LOCKSHARED isn't set. */ if (!(flags & LOCKSHARED)) return (1); /* * For lookups during open(), if the mount point supports * extended shared operations, then use a shared lock for the * leaf node, otherwise use an exclusive lock. */ if ((flags & ISOPEN) != 0) return (!MNT_EXTENDED_SHARED(mp)); /* * Lookup requests outside of open() that specify LOCKSHARED * only need a shared lock on the leaf vnode. */ return (0); } /* * Search a pathname. * This is a very central and rather complicated routine. * * The pathname is pointed to by ni_ptr and is of length ni_pathlen. * The starting directory is taken from ni_startdir. The pathname is * descended until done, or a symbolic link is encountered. The variable * ni_more is clear if the path is completed; it is set to one if a * symbolic link needing interpretation is encountered. * * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on * whether the name is to be looked up, created, renamed, or deleted. * When CREATE, RENAME, or DELETE is specified, information usable in * creating, renaming, or deleting a directory entry may be calculated. * If flag has LOCKPARENT or'ed into it, the parent directory is returned * locked. If flag has WANTPARENT or'ed into it, the parent directory is * returned unlocked. Otherwise the parent directory is not returned. If * the target of the pathname exists and LOCKLEAF is or'ed into the flag * the target is returned locked, otherwise it is returned unlocked. * When creating or renaming and LOCKPARENT is specified, the target may not * be ".". When deleting and LOCKPARENT is specified, the target may be ".". * * Overall outline of lookup: * * dirloop: * identify next component of name at ndp->ni_ptr * handle degenerate case where name is null string * if .. and crossing mount points and on mounted filesys, find parent * call VOP_LOOKUP routine for next component name * directory vnode returned in ni_dvp, unlocked unless LOCKPARENT set * component vnode returned in ni_vp (if it exists), locked. * if result vnode is mounted on and crossing mount points, * find mounted on vnode * if more components of name, do next level at dirloop * return the answer in ni_vp, locked if LOCKLEAF set * if LOCKPARENT set, return locked parent in ni_dvp * if WANTPARENT set, return unlocked parent in ni_dvp */ int lookup(struct nameidata *ndp) { char *cp; /* pointer into pathname argument */ char *prev_ni_next; /* saved ndp->ni_next */ struct vnode *dp = NULL; /* the directory we are searching */ struct vnode *tdp; /* saved dp */ struct mount *mp; /* mount table entry */ struct prison *pr; size_t prev_ni_pathlen; /* saved ndp->ni_pathlen */ int docache; /* == 0 do not cache last component */ int wantparent; /* 1 => wantparent or lockparent flag */ int rdonly; /* lookup read-only flag bit */ int error = 0; int dpunlocked = 0; /* dp has already been unlocked */ int relookup = 0; /* do not consume the path component */ struct componentname *cnp = &ndp->ni_cnd; int lkflags_save; int ni_dvp_unlocked; /* * Setup: break out flag bits into variables. */ ni_dvp_unlocked = 0; wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT); KASSERT(cnp->cn_nameiop == LOOKUP || wantparent, ("CREATE, DELETE, RENAME require LOCKPARENT or WANTPARENT.")); docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; if (cnp->cn_nameiop == DELETE || (wantparent && cnp->cn_nameiop != CREATE && cnp->cn_nameiop != LOOKUP)) docache = 0; rdonly = cnp->cn_flags & RDONLY; cnp->cn_flags &= ~ISSYMLINK; ndp->ni_dvp = NULL; /* * We use shared locks until we hit the parent of the last cn then * we adjust based on the requesting flags. */ if (lookup_shared) cnp->cn_lkflags = LK_SHARED; else cnp->cn_lkflags = LK_EXCLUSIVE; dp = ndp->ni_startdir; ndp->ni_startdir = NULLVP; vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY, cnp->cn_flags)); dirloop: /* * Search a new directory. * * The last component of the filename is left accessible via * cnp->cn_nameptr for callers that need the name. Callers needing * the name set the SAVENAME flag. When done, they assume * responsibility for freeing the pathname buffer. */ for (cp = cnp->cn_nameptr; *cp != 0 && *cp != '/'; cp++) continue; cnp->cn_namelen = cp - cnp->cn_nameptr; if (cnp->cn_namelen > NAME_MAX) { error = ENAMETOOLONG; goto bad; } #ifdef NAMEI_DIAGNOSTIC { char c = *cp; *cp = '\0'; printf("{%s}: ", cnp->cn_nameptr); *cp = c; } #endif prev_ni_pathlen = ndp->ni_pathlen; ndp->ni_pathlen -= cnp->cn_namelen; KASSERT(ndp->ni_pathlen <= PATH_MAX, ("%s: ni_pathlen underflow to %zd\n", __func__, ndp->ni_pathlen)); prev_ni_next = ndp->ni_next; ndp->ni_next = cp; /* * Replace multiple slashes by a single slash and trailing slashes * by a null. This must be done before VOP_LOOKUP() because some * fs's don't know about trailing slashes. Remember if there were * trailing slashes to handle symlinks, existing non-directories * and non-existing files that won't be directories specially later. */ while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) { cp++; ndp->ni_pathlen--; if (*cp == '\0') { *ndp->ni_next = '\0'; cnp->cn_flags |= TRAILINGSLASH; } } ndp->ni_next = cp; cnp->cn_flags |= MAKEENTRY; if (*cp == '\0' && docache == 0) cnp->cn_flags &= ~MAKEENTRY; if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') cnp->cn_flags |= ISDOTDOT; else cnp->cn_flags &= ~ISDOTDOT; if (*ndp->ni_next == 0) cnp->cn_flags |= ISLASTCN; else cnp->cn_flags &= ~ISLASTCN; if ((cnp->cn_flags & ISLASTCN) != 0 && cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.' && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { error = EINVAL; goto bad; } nameicap_tracker_add(ndp, dp); /* * Check for degenerate name (e.g. / or "") * which is a way of talking about a directory, * e.g. like "/." or ".". */ if (cnp->cn_nameptr[0] == '\0') { if (dp->v_type != VDIR) { error = ENOTDIR; goto bad; } if (cnp->cn_nameiop != LOOKUP) { error = EISDIR; goto bad; } if (wantparent) { ndp->ni_dvp = dp; VREF(dp); } ndp->ni_vp = dp; if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_VNODE1(dp); else if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_VNODE2(dp); if (!(cnp->cn_flags & (LOCKPARENT | LOCKLEAF))) VOP_UNLOCK(dp, 0); /* XXX This should probably move to the top of function. */ if (cnp->cn_flags & SAVESTART) panic("lookup: SAVESTART"); goto success; } /* * Handle "..": five special cases. * 0. If doing a capability lookup and lookup_cap_dotdot is * disabled, return ENOTCAPABLE. * 1. Return an error if this is the last component of * the name and the operation is DELETE or RENAME. * 2. If at root directory (e.g. after chroot) * or at absolute root directory * then ignore it so can't get out. * 3. If this vnode is the root of a mounted * filesystem, then replace it with the * vnode which was mounted on so we take the * .. in the other filesystem. * 4. If the vnode is the top directory of * the jail or chroot, don't let them out. * 5. If doing a capability lookup and lookup_cap_dotdot is * enabled, return ENOTCAPABLE if the lookup would escape * from the initial file descriptor directory. Checks are * done by ensuring that namei() already traversed the * result of dotdot lookup. */ if (cnp->cn_flags & ISDOTDOT) { if ((ndp->ni_lcf & (NI_LCF_STRICTRELATIVE | NI_LCF_CAP_DOTDOT)) == NI_LCF_STRICTRELATIVE) { #ifdef KTRACE if (KTRPOINT(curthread, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_LOOKUP, NULL, NULL); #endif error = ENOTCAPABLE; goto bad; } if ((cnp->cn_flags & ISLASTCN) != 0 && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { error = EINVAL; goto bad; } for (;;) { for (pr = cnp->cn_cred->cr_prison; pr != NULL; pr = pr->pr_parent) if (dp == pr->pr_root) break; if (dp == ndp->ni_rootdir || dp == ndp->ni_topdir || dp == rootvnode || pr != NULL || ((dp->v_vflag & VV_ROOT) != 0 && (cnp->cn_flags & NOCROSSMOUNT) != 0)) { ndp->ni_dvp = dp; ndp->ni_vp = dp; VREF(dp); goto nextname; } if ((dp->v_vflag & VV_ROOT) == 0) break; if (dp->v_iflag & VI_DOOMED) { /* forced unmount */ error = ENOENT; goto bad; } tdp = dp; dp = dp->v_mount->mnt_vnodecovered; VREF(dp); vput(tdp); vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY, ISDOTDOT)); error = nameicap_check_dotdot(ndp, dp); if (error != 0) { #ifdef KTRACE if (KTRPOINT(curthread, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_LOOKUP, NULL, NULL); #endif goto bad; } } } /* * We now have a segment name to search for, and a directory to search. */ unionlookup: #ifdef MAC if ((cnp->cn_flags & NOMACCHECK) == 0) { error = mac_vnode_check_lookup(cnp->cn_thread->td_ucred, dp, cnp); if (error) goto bad; } #endif ndp->ni_dvp = dp; ndp->ni_vp = NULL; ASSERT_VOP_LOCKED(dp, "lookup"); /* * If we have a shared lock we may need to upgrade the lock for the * last operation. */ if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN) && dp != vp_crossmp && VOP_ISLOCKED(dp) == LK_SHARED) vn_lock(dp, LK_UPGRADE|LK_RETRY); if ((dp->v_iflag & VI_DOOMED) != 0) { error = ENOENT; goto bad; } /* * If we're looking up the last component and we need an exclusive * lock, adjust our lkflags. */ if (needs_exclusive_leaf(dp->v_mount, cnp->cn_flags)) cnp->cn_lkflags = LK_EXCLUSIVE; #ifdef NAMEI_DIAGNOSTIC vn_printf(dp, "lookup in "); #endif lkflags_save = cnp->cn_lkflags; cnp->cn_lkflags = compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags, cnp->cn_flags); error = VOP_LOOKUP(dp, &ndp->ni_vp, cnp); cnp->cn_lkflags = lkflags_save; if (error != 0) { KASSERT(ndp->ni_vp == NULL, ("leaf should be empty")); #ifdef NAMEI_DIAGNOSTIC printf("not found\n"); #endif if ((error == ENOENT) && (dp->v_vflag & VV_ROOT) && (dp->v_mount != NULL) && (dp->v_mount->mnt_flag & MNT_UNION)) { tdp = dp; dp = dp->v_mount->mnt_vnodecovered; VREF(dp); vput(tdp); vn_lock(dp, compute_cn_lkflags(dp->v_mount, cnp->cn_lkflags | LK_RETRY, cnp->cn_flags)); nameicap_tracker_add(ndp, dp); goto unionlookup; } if (error == ERELOOKUP) { vref(dp); ndp->ni_vp = dp; error = 0; relookup = 1; goto good; } if (error != EJUSTRETURN) goto bad; /* * At this point, we know we're at the end of the * pathname. If creating / renaming, we can consider * allowing the file or directory to be created / renamed, * provided we're not on a read-only filesystem. */ if (rdonly) { error = EROFS; goto bad; } /* trailing slash only allowed for directories */ if ((cnp->cn_flags & TRAILINGSLASH) && !(cnp->cn_flags & WILLBEDIR)) { error = ENOENT; goto bad; } if ((cnp->cn_flags & LOCKPARENT) == 0) VOP_UNLOCK(dp, 0); /* * We return with ni_vp NULL to indicate that the entry * doesn't currently exist, leaving a pointer to the * (possibly locked) directory vnode in ndp->ni_dvp. */ if (cnp->cn_flags & SAVESTART) { ndp->ni_startdir = ndp->ni_dvp; VREF(ndp->ni_startdir); } goto success; } good: #ifdef NAMEI_DIAGNOSTIC printf("found\n"); #endif dp = ndp->ni_vp; /* * Check to see if the vnode has been mounted on; * if so find the root of the mounted filesystem. */ while (dp->v_type == VDIR && (mp = dp->v_mountedhere) && (cnp->cn_flags & NOCROSSMOUNT) == 0) { if (vfs_busy(mp, 0)) continue; vput(dp); if (dp != ndp->ni_dvp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); vrefact(vp_crossmp); ndp->ni_dvp = vp_crossmp; error = VFS_ROOT(mp, compute_cn_lkflags(mp, cnp->cn_lkflags, cnp->cn_flags), &tdp); vfs_unbusy(mp); if (vn_lock(vp_crossmp, LK_SHARED | LK_NOWAIT)) panic("vp_crossmp exclusively locked or reclaimed"); if (error) { dpunlocked = 1; goto bad2; } ndp->ni_vp = dp = tdp; } /* * Check for symbolic link */ if ((dp->v_type == VLNK) && ((cnp->cn_flags & FOLLOW) || (cnp->cn_flags & TRAILINGSLASH) || *ndp->ni_next == '/')) { cnp->cn_flags |= ISSYMLINK; if (dp->v_iflag & VI_DOOMED) { /* * We can't know whether the directory was mounted with * NOSYMFOLLOW, so we can't follow safely. */ error = ENOENT; goto bad2; } if (dp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) { error = EACCES; goto bad2; } /* * Symlink code always expects an unlocked dvp. */ if (ndp->ni_dvp != ndp->ni_vp) { VOP_UNLOCK(ndp->ni_dvp, 0); ni_dvp_unlocked = 1; } goto success; } nextname: /* * Not a symbolic link that we will follow. Continue with the * next component if there is any; otherwise, we're done. */ KASSERT((cnp->cn_flags & ISLASTCN) || *ndp->ni_next == '/', ("lookup: invalid path state.")); if (relookup) { relookup = 0; ndp->ni_pathlen = prev_ni_pathlen; ndp->ni_next = prev_ni_next; if (ndp->ni_dvp != dp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); goto dirloop; } if (cnp->cn_flags & ISDOTDOT) { error = nameicap_check_dotdot(ndp, ndp->ni_vp); if (error != 0) { #ifdef KTRACE if (KTRPOINT(curthread, KTR_CAPFAIL)) ktrcapfail(CAPFAIL_LOOKUP, NULL, NULL); #endif goto bad2; } } if (*ndp->ni_next == '/') { cnp->cn_nameptr = ndp->ni_next; while (*cnp->cn_nameptr == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; } if (ndp->ni_dvp != dp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); goto dirloop; } /* * If we're processing a path with a trailing slash, * check that the end result is a directory. */ if ((cnp->cn_flags & TRAILINGSLASH) && dp->v_type != VDIR) { error = ENOTDIR; goto bad2; } /* * Disallow directory write attempts on read-only filesystems. */ if (rdonly && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { error = EROFS; goto bad2; } if (cnp->cn_flags & SAVESTART) { ndp->ni_startdir = ndp->ni_dvp; VREF(ndp->ni_startdir); } if (!wantparent) { ni_dvp_unlocked = 2; if (ndp->ni_dvp != dp) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); } else if ((cnp->cn_flags & LOCKPARENT) == 0 && ndp->ni_dvp != dp) { VOP_UNLOCK(ndp->ni_dvp, 0); ni_dvp_unlocked = 1; } if (cnp->cn_flags & AUDITVNODE1) AUDIT_ARG_VNODE1(dp); else if (cnp->cn_flags & AUDITVNODE2) AUDIT_ARG_VNODE2(dp); if ((cnp->cn_flags & LOCKLEAF) == 0) VOP_UNLOCK(dp, 0); success: /* * Because of lookup_shared we may have the vnode shared locked, but * the caller may want it to be exclusively locked. */ if (needs_exclusive_leaf(dp->v_mount, cnp->cn_flags) && VOP_ISLOCKED(dp) != LK_EXCLUSIVE) { vn_lock(dp, LK_UPGRADE | LK_RETRY); if (dp->v_iflag & VI_DOOMED) { error = ENOENT; goto bad2; } } return (0); bad2: if (ni_dvp_unlocked != 2) { if (dp != ndp->ni_dvp && !ni_dvp_unlocked) vput(ndp->ni_dvp); else vrele(ndp->ni_dvp); } bad: if (!dpunlocked) vput(dp); ndp->ni_vp = NULL; return (error); } /* * relookup - lookup a path name component * Used by lookup to re-acquire things. */ int relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) { struct vnode *dp = NULL; /* the directory we are searching */ int wantparent; /* 1 => wantparent or lockparent flag */ int rdonly; /* lookup read-only flag bit */ int error = 0; KASSERT(cnp->cn_flags & ISLASTCN, ("relookup: Not given last component.")); /* * Setup: break out flag bits into variables. */ wantparent = cnp->cn_flags & (LOCKPARENT|WANTPARENT); KASSERT(wantparent, ("relookup: parent not wanted.")); rdonly = cnp->cn_flags & RDONLY; cnp->cn_flags &= ~ISSYMLINK; dp = dvp; cnp->cn_lkflags = LK_EXCLUSIVE; vn_lock(dp, LK_EXCLUSIVE | LK_RETRY); /* * Search a new directory. * * The last component of the filename is left accessible via * cnp->cn_nameptr for callers that need the name. Callers needing * the name set the SAVENAME flag. When done, they assume * responsibility for freeing the pathname buffer. */ #ifdef NAMEI_DIAGNOSTIC printf("{%s}: ", cnp->cn_nameptr); #endif /* * Check for "" which represents the root directory after slash * removal. */ if (cnp->cn_nameptr[0] == '\0') { /* * Support only LOOKUP for "/" because lookup() * can't succeed for CREATE, DELETE and RENAME. */ KASSERT(cnp->cn_nameiop == LOOKUP, ("nameiop must be LOOKUP")); KASSERT(dp->v_type == VDIR, ("dp is not a directory")); if (!(cnp->cn_flags & LOCKLEAF)) VOP_UNLOCK(dp, 0); *vpp = dp; /* XXX This should probably move to the top of function. */ if (cnp->cn_flags & SAVESTART) panic("lookup: SAVESTART"); return (0); } if (cnp->cn_flags & ISDOTDOT) panic ("relookup: lookup on dot-dot"); /* * We now have a segment name to search for, and a directory to search. */ #ifdef NAMEI_DIAGNOSTIC vn_printf(dp, "search in "); #endif if ((error = VOP_LOOKUP(dp, vpp, cnp)) != 0) { KASSERT(*vpp == NULL, ("leaf should be empty")); if (error != EJUSTRETURN) goto bad; /* * If creating and at end of pathname, then can consider * allowing file to be created. */ if (rdonly) { error = EROFS; goto bad; } /* ASSERT(dvp == ndp->ni_startdir) */ if (cnp->cn_flags & SAVESTART) VREF(dvp); if ((cnp->cn_flags & LOCKPARENT) == 0) VOP_UNLOCK(dp, 0); /* * We return with ni_vp NULL to indicate that the entry * doesn't currently exist, leaving a pointer to the * (possibly locked) directory vnode in ndp->ni_dvp. */ return (0); } dp = *vpp; /* * Disallow directory write attempts on read-only filesystems. */ if (rdonly && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { if (dvp == dp) vrele(dvp); else vput(dvp); error = EROFS; goto bad; } /* * Set the parent lock/ref state to the requested state. */ if ((cnp->cn_flags & LOCKPARENT) == 0 && dvp != dp) { if (wantparent) VOP_UNLOCK(dvp, 0); else vput(dvp); } else if (!wantparent) vrele(dvp); /* * Check for symbolic link */ KASSERT(dp->v_type != VLNK || !(cnp->cn_flags & FOLLOW), ("relookup: symlink found.\n")); /* ASSERT(dvp == ndp->ni_startdir) */ if (cnp->cn_flags & SAVESTART) VREF(dvp); if ((cnp->cn_flags & LOCKLEAF) == 0) VOP_UNLOCK(dp, 0); return (0); bad: vput(dp); *vpp = NULL; return (error); } void NDINIT_ALL(struct nameidata *ndp, u_long op, u_long flags, enum uio_seg segflg, const char *namep, int dirfd, struct vnode *startdir, cap_rights_t *rightsp, struct thread *td) { ndp->ni_cnd.cn_nameiop = op; ndp->ni_cnd.cn_flags = flags; ndp->ni_segflg = segflg; ndp->ni_dirp = namep; ndp->ni_dirfd = dirfd; ndp->ni_startdir = startdir; if (rightsp != NULL) ndp->ni_rightsneeded = *rightsp; else cap_rights_init(&ndp->ni_rightsneeded); filecaps_init(&ndp->ni_filecaps); ndp->ni_cnd.cn_thread = td; } /* * Free data allocated by namei(); see namei(9) for details. */ void NDFREE(struct nameidata *ndp, const u_int flags) { int unlock_dvp; int unlock_vp; unlock_dvp = 0; unlock_vp = 0; if (!(flags & NDF_NO_FREE_PNBUF) && (ndp->ni_cnd.cn_flags & HASBUF)) { uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); ndp->ni_cnd.cn_flags &= ~HASBUF; } if (!(flags & NDF_NO_VP_UNLOCK) && (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) unlock_vp = 1; if (!(flags & NDF_NO_VP_RELE) && ndp->ni_vp) { if (unlock_vp) { vput(ndp->ni_vp); unlock_vp = 0; } else vrele(ndp->ni_vp); ndp->ni_vp = NULL; } if (unlock_vp) VOP_UNLOCK(ndp->ni_vp, 0); if (!(flags & NDF_NO_DVP_UNLOCK) && (ndp->ni_cnd.cn_flags & LOCKPARENT) && ndp->ni_dvp != ndp->ni_vp) unlock_dvp = 1; if (!(flags & NDF_NO_DVP_RELE) && (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { if (unlock_dvp) { vput(ndp->ni_dvp); unlock_dvp = 0; } else vrele(ndp->ni_dvp); ndp->ni_dvp = NULL; } if (unlock_dvp) VOP_UNLOCK(ndp->ni_dvp, 0); if (!(flags & NDF_NO_STARTDIR_RELE) && (ndp->ni_cnd.cn_flags & SAVESTART)) { vrele(ndp->ni_startdir); ndp->ni_startdir = NULL; } } /* * Determine if there is a suitable alternate filename under the specified * prefix for the specified path. If the create flag is set, then the * alternate prefix will be used so long as the parent directory exists. * This is used by the various compatibility ABIs so that Linux binaries prefer * files under /compat/linux for example. The chosen path (whether under * the prefix or under /) is returned in a kernel malloc'd buffer pointed * to by pathbuf. The caller is responsible for free'ing the buffer from * the M_TEMP bucket if one is returned. */ int kern_alternate_path(struct thread *td, const char *prefix, const char *path, enum uio_seg pathseg, char **pathbuf, int create, int dirfd) { struct nameidata nd, ndroot; char *ptr, *buf, *cp; size_t len, sz; int error; buf = (char *) malloc(MAXPATHLEN, M_TEMP, M_WAITOK); *pathbuf = buf; /* Copy the prefix into the new pathname as a starting point. */ len = strlcpy(buf, prefix, MAXPATHLEN); if (len >= MAXPATHLEN) { *pathbuf = NULL; free(buf, M_TEMP); return (EINVAL); } sz = MAXPATHLEN - len; ptr = buf + len; /* Append the filename to the prefix. */ if (pathseg == UIO_SYSSPACE) error = copystr(path, ptr, sz, &len); else error = copyinstr(path, ptr, sz, &len); if (error) { *pathbuf = NULL; free(buf, M_TEMP); return (error); } /* Only use a prefix with absolute pathnames. */ if (*ptr != '/') { error = EINVAL; goto keeporig; } if (dirfd != AT_FDCWD) { /* * We want the original because the "prefix" is * included in the already opened dirfd. */ bcopy(ptr, buf, len); return (0); } /* * We know that there is a / somewhere in this pathname. * Search backwards for it, to find the file's parent dir * to see if it exists in the alternate tree. If it does, * and we want to create a file (cflag is set). We don't * need to worry about the root comparison in this case. */ if (create) { for (cp = &ptr[len] - 1; *cp != '/'; cp--); *cp = '\0'; NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, buf, td); error = namei(&nd); *cp = '/'; if (error != 0) goto keeporig; } else { NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, buf, td); error = namei(&nd); if (error != 0) goto keeporig; /* * We now compare the vnode of the prefix to the one * vnode asked. If they resolve to be the same, then we * ignore the match so that the real root gets used. * This avoids the problem of traversing "../.." to find the * root directory and never finding it, because "/" resolves * to the emulation root directory. This is expensive :-( */ NDINIT(&ndroot, LOOKUP, FOLLOW, UIO_SYSSPACE, prefix, td); /* We shouldn't ever get an error from this namei(). */ error = namei(&ndroot); if (error == 0) { if (nd.ni_vp == ndroot.ni_vp) error = ENOENT; NDFREE(&ndroot, NDF_ONLY_PNBUF); vrele(ndroot.ni_vp); } } NDFREE(&nd, NDF_ONLY_PNBUF); vrele(nd.ni_vp); keeporig: /* If there was an error, use the original path name. */ if (error) bcopy(ptr, buf, len); return (error); } Index: projects/clang400-import/sys/kern/vfs_vnops.c =================================================================== --- projects/clang400-import/sys/kern/vfs_vnops.c (revision 312719) +++ projects/clang400-import/sys/kern/vfs_vnops.c (revision 312720) @@ -1,2476 +1,2470 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Copyright (c) 2012 Konstantin Belousov * Copyright (c) 2013, 2014 The FreeBSD Foundation * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif static fo_rdwr_t vn_read; static fo_rdwr_t vn_write; static fo_rdwr_t vn_io_fault; static fo_truncate_t vn_truncate; static fo_ioctl_t vn_ioctl; static fo_poll_t vn_poll; static fo_kqfilter_t vn_kqfilter; static fo_stat_t vn_statfile; static fo_close_t vn_closefile; static fo_mmap_t vn_mmap; struct fileops vnops = { .fo_read = vn_io_fault, .fo_write = vn_io_fault, .fo_truncate = vn_truncate, .fo_ioctl = vn_ioctl, .fo_poll = vn_poll, .fo_kqfilter = vn_kqfilter, .fo_stat = vn_statfile, .fo_close = vn_closefile, .fo_chmod = vn_chmod, .fo_chown = vn_chown, .fo_sendfile = vn_sendfile, .fo_seek = vn_seek, .fo_fill_kinfo = vn_fill_kinfo, .fo_mmap = vn_mmap, .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE }; static const int io_hold_cnt = 16; static int vn_io_fault_enable = 1; SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); static int vn_io_fault_prefault = 0; SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RW, &vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting"); static u_long vn_io_faults_cnt; SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); /* * Returns true if vn_io_fault mode of handling the i/o request should * be used. */ static bool do_vn_io_fault(struct vnode *vp, struct uio *uio) { struct mount *mp; return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && (mp = vp->v_mount) != NULL && (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); } /* * Structure used to pass arguments to vn_io_fault1(), to do either * file- or vnode-based I/O calls. */ struct vn_io_fault_args { enum { VN_IO_FAULT_FOP, VN_IO_FAULT_VOP } kind; struct ucred *cred; int flags; union { struct fop_args_tag { struct file *fp; fo_rdwr_t *doio; } fop_args; struct vop_args_tag { struct vnode *vp; } vop_args; } args; }; static int vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, struct thread *td); int vn_open(ndp, flagp, cmode, fp) struct nameidata *ndp; int *flagp, cmode; struct file *fp; { struct thread *td = ndp->ni_cnd.cn_thread; return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); } /* * Common code for vnode open operations via a name lookup. * Lookup the vnode and invoke VOP_CREATE if needed. * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. * * Note that this does NOT free nameidata for the successful case, * due to the NDINIT being done elsewhere. */ int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, struct ucred *cred, struct file *fp) { struct vnode *vp; struct mount *mp; struct thread *td = ndp->ni_cnd.cn_thread; struct vattr vat; struct vattr *vap = &vat; int fmode, error; restart: fmode = *flagp; if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT | O_EXCL | O_DIRECTORY)) return (EINVAL); else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) { ndp->ni_cnd.cn_nameiop = CREATE; /* * Set NOCACHE to avoid flushing the cache when * rolling in many files at once. */ ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF | NOCACHE; if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) ndp->ni_cnd.cn_flags |= FOLLOW; if (!(vn_open_flags & VN_OPEN_NOAUDIT)) ndp->ni_cnd.cn_flags |= AUDITVNODE1; if (vn_open_flags & VN_OPEN_NOCAPCHECK) ndp->ni_cnd.cn_flags |= NOCAPCHECK; bwillwrite(); if ((error = namei(ndp)) != 0) return (error); if (ndp->ni_vp == NULL) { VATTR_NULL(vap); vap->va_type = VREG; vap->va_mode = cmode; if (fmode & O_EXCL) vap->va_vaflags |= VA_EXCLUSIVE; if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { NDFREE(ndp, NDF_ONLY_PNBUF); vput(ndp->ni_dvp); if ((error = vn_start_write(NULL, &mp, V_XSLEEP | PCATCH)) != 0) return (error); goto restart; } if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0) ndp->ni_cnd.cn_flags |= MAKEENTRY; #ifdef MAC error = mac_vnode_check_create(cred, ndp->ni_dvp, &ndp->ni_cnd, vap); if (error == 0) #endif error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, &ndp->ni_cnd, vap); vput(ndp->ni_dvp); vn_finished_write(mp); if (error) { NDFREE(ndp, NDF_ONLY_PNBUF); return (error); } fmode &= ~O_TRUNC; vp = ndp->ni_vp; } else { if (ndp->ni_dvp == ndp->ni_vp) vrele(ndp->ni_dvp); else vput(ndp->ni_dvp); ndp->ni_dvp = NULL; vp = ndp->ni_vp; if (fmode & O_EXCL) { error = EEXIST; goto bad; } fmode &= ~O_CREAT; } } else { ndp->ni_cnd.cn_nameiop = LOOKUP; ndp->ni_cnd.cn_flags = ISOPEN | ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; if (!(fmode & FWRITE)) ndp->ni_cnd.cn_flags |= LOCKSHARED; if (!(vn_open_flags & VN_OPEN_NOAUDIT)) ndp->ni_cnd.cn_flags |= AUDITVNODE1; if (vn_open_flags & VN_OPEN_NOCAPCHECK) ndp->ni_cnd.cn_flags |= NOCAPCHECK; if ((error = namei(ndp)) != 0) return (error); vp = ndp->ni_vp; } error = vn_open_vnode(vp, fmode, cred, td, fp); if (error) goto bad; *flagp = fmode; return (0); bad: NDFREE(ndp, NDF_ONLY_PNBUF); vput(vp); *flagp = fmode; ndp->ni_vp = NULL; return (error); } /* * Common code for vnode open operations once a vnode is located. * Check permissions, and call the VOP_OPEN routine. */ int vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, struct thread *td, struct file *fp) { accmode_t accmode; struct flock lf; int error, lock_flags, type; if (vp->v_type == VLNK) return (EMLINK); if (vp->v_type == VSOCK) return (EOPNOTSUPP); if (vp->v_type != VDIR && fmode & O_DIRECTORY) return (ENOTDIR); accmode = 0; if (fmode & (FWRITE | O_TRUNC)) { if (vp->v_type == VDIR) return (EISDIR); accmode |= VWRITE; } if (fmode & FREAD) accmode |= VREAD; if (fmode & FEXEC) accmode |= VEXEC; if ((fmode & O_APPEND) && (fmode & FWRITE)) accmode |= VAPPEND; #ifdef MAC if (fmode & O_CREAT) accmode |= VCREAT; if (fmode & O_VERIFY) accmode |= VVERIFY; error = mac_vnode_check_open(cred, vp, accmode); if (error) return (error); accmode &= ~(VCREAT | VVERIFY); #endif if ((fmode & O_CREAT) == 0) { if (accmode & VWRITE) { error = vn_writechk(vp); if (error) return (error); } if (accmode) { error = VOP_ACCESS(vp, accmode, cred, td); if (error) return (error); } } if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) vn_lock(vp, LK_UPGRADE | LK_RETRY); if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0) return (error); if (fmode & (O_EXLOCK | O_SHLOCK)) { KASSERT(fp != NULL, ("open with flock requires fp")); lock_flags = VOP_ISLOCKED(vp); VOP_UNLOCK(vp, 0); lf.l_whence = SEEK_SET; lf.l_start = 0; lf.l_len = 0; if (fmode & O_EXLOCK) lf.l_type = F_WRLCK; else lf.l_type = F_RDLCK; type = F_FLOCK; if ((fmode & FNONBLOCK) == 0) type |= F_WAIT; error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); if (error == 0) fp->f_flag |= FHASLOCK; vn_lock(vp, lock_flags | LK_RETRY); if (error == 0 && vp->v_iflag & VI_DOOMED) error = ENOENT; /* * Another thread might have used this vnode as an * executable while the vnode lock was dropped. * Ensure the vnode is still able to be opened for * writing after the lock has been obtained. */ if (error == 0 && accmode & VWRITE) error = vn_writechk(vp); if (error != 0) { fp->f_flag |= FOPENFAILED; fp->f_vnode = vp; if (fp->f_ops == &badfileops) { fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; } vref(vp); } } if (error == 0 && fmode & FWRITE) { VOP_ADD_WRITECOUNT(vp, 1); CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", __func__, vp, vp->v_writecount); } ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); return (error); } /* * Check for write permissions on the specified vnode. * Prototype text segments cannot be written. */ int vn_writechk(vp) register struct vnode *vp; { ASSERT_VOP_LOCKED(vp, "vn_writechk"); /* * If there's shared text associated with * the vnode, try to free it up once. If * we fail, we can't allow writing. */ if (VOP_IS_TEXT(vp)) return (ETXTBSY); return (0); } /* * Vnode close call */ int vn_close(vp, flags, file_cred, td) register struct vnode *vp; int flags; struct ucred *file_cred; struct thread *td; { struct mount *mp; int error, lock_flags; if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && MNT_EXTENDED_SHARED(vp->v_mount)) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; vn_start_write(vp, &mp, V_WAIT); vn_lock(vp, lock_flags | LK_RETRY); AUDIT_ARG_VNODE1(vp); if ((flags & (FWRITE | FOPENFAILED)) == FWRITE) { VNASSERT(vp->v_writecount > 0, vp, ("vn_close: negative writecount")); VOP_ADD_WRITECOUNT(vp, -1); CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", __func__, vp, vp->v_writecount); } error = VOP_CLOSE(vp, flags, file_cred, td); vput(vp); vn_finished_write(mp); return (error); } /* * Heuristic to detect sequential operation. */ static int sequential_heuristic(struct uio *uio, struct file *fp) { ASSERT_VOP_LOCKED(fp->f_vnode, __func__); if (fp->f_flag & FRDAHEAD) return (fp->f_seqcount << IO_SEQSHIFT); /* * Offset 0 is handled specially. open() sets f_seqcount to 1 so * that the first I/O is normally considered to be slightly * sequential. Seeking to offset 0 doesn't change sequentiality * unless previous seeks have reduced f_seqcount to 0, in which * case offset 0 is not special. */ if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || uio->uio_offset == fp->f_nextoff) { /* * f_seqcount is in units of fixed-size blocks so that it * depends mainly on the amount of sequential I/O and not * much on the number of sequential I/O's. The fixed size * of 16384 is hard-coded here since it is (not quite) just * a magic size that works well here. This size is more * closely related to the best I/O size for real disks than * to any block size used by software. */ fp->f_seqcount += howmany(uio->uio_resid, 16384); if (fp->f_seqcount > IO_SEQMAX) fp->f_seqcount = IO_SEQMAX; return (fp->f_seqcount << IO_SEQSHIFT); } /* Not sequential. Quickly draw-down sequentiality. */ if (fp->f_seqcount > 1) fp->f_seqcount = 1; else fp->f_seqcount = 0; return (0); } /* * Package up an I/O request on a vnode into a uio and do it. */ int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid, struct thread *td) { struct uio auio; struct iovec aiov; struct mount *mp; struct ucred *cred; void *rl_cookie; struct vn_io_fault_args args; int error, lock_flags; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = base; aiov.iov_len = len; auio.uio_resid = len; auio.uio_offset = offset; auio.uio_segflg = segflg; auio.uio_rw = rw; auio.uio_td = td; error = 0; if ((ioflg & IO_NODELOCKED) == 0) { if ((ioflg & IO_RANGELOCKED) == 0) { if (rw == UIO_READ) { rl_cookie = vn_rangelock_rlock(vp, offset, offset + len); } else { rl_cookie = vn_rangelock_wlock(vp, offset, offset + len); } } else rl_cookie = NULL; mp = NULL; if (rw == UIO_WRITE) { if (vp->v_type != VCHR && (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) goto out; if (MNT_SHARED_WRITES(mp) || ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; } else lock_flags = LK_SHARED; vn_lock(vp, lock_flags | LK_RETRY); } else rl_cookie = NULL; ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); #ifdef MAC if ((ioflg & IO_NOMACCHECK) == 0) { if (rw == UIO_READ) error = mac_vnode_check_read(active_cred, file_cred, vp); else error = mac_vnode_check_write(active_cred, file_cred, vp); } #endif if (error == 0) { if (file_cred != NULL) cred = file_cred; else cred = active_cred; if (do_vn_io_fault(vp, &auio)) { args.kind = VN_IO_FAULT_VOP; args.cred = cred; args.flags = ioflg; args.args.vop_args.vp = vp; error = vn_io_fault1(vp, &auio, &args, td); } else if (rw == UIO_READ) { error = VOP_READ(vp, &auio, ioflg, cred); } else /* if (rw == UIO_WRITE) */ { error = VOP_WRITE(vp, &auio, ioflg, cred); } } if (aresid) *aresid = auio.uio_resid; else if (auio.uio_resid && error == 0) error = EIO; if ((ioflg & IO_NODELOCKED) == 0) { VOP_UNLOCK(vp, 0); if (mp != NULL) vn_finished_write(mp); } out: if (rl_cookie != NULL) vn_rangelock_unlock(vp, rl_cookie); return (error); } /* * Package up an I/O request on a vnode into a uio and do it. The I/O * request is split up into smaller chunks and we try to avoid saturating * the buffer cache while potentially holding a vnode locked, so we * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() * to give other processes a chance to lock the vnode (either other processes * core'ing the same binary, or unrelated processes scanning the directory). */ int vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, aresid, td) enum uio_rw rw; struct vnode *vp; void *base; size_t len; off_t offset; enum uio_seg segflg; int ioflg; struct ucred *active_cred; struct ucred *file_cred; size_t *aresid; struct thread *td; { int error = 0; ssize_t iaresid; do { int chunk; /* * Force `offset' to a multiple of MAXBSIZE except possibly * for the first chunk, so that filesystems only need to * write full blocks except possibly for the first and last * chunks. */ chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; if (chunk > len) chunk = len; if (rw != UIO_READ && vp->v_type == VREG) bwillwrite(); iaresid = 0; error = vn_rdwr(rw, vp, base, chunk, offset, segflg, ioflg, active_cred, file_cred, &iaresid, td); len -= chunk; /* aresid calc already includes length */ if (error) break; offset += chunk; base = (char *)base + chunk; kern_yield(PRI_USER); } while (len); if (aresid) *aresid = len + iaresid; return (error); } off_t foffset_lock(struct file *fp, int flags) { struct mtx *mtxp; off_t res; KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); #if OFF_MAX <= LONG_MAX /* * Caller only wants the current f_offset value. Assume that * the long and shorter integer types reads are atomic. */ if ((flags & FOF_NOLOCK) != 0) return (fp->f_offset); #endif /* * According to McKusick the vn lock was protecting f_offset here. * It is now protected by the FOFFSET_LOCKED flag. */ mtxp = mtx_pool_find(mtxpool_sleep, fp); mtx_lock(mtxp); if ((flags & FOF_NOLOCK) == 0) { while (fp->f_vnread_flags & FOFFSET_LOCKED) { fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; msleep(&fp->f_vnread_flags, mtxp, PUSER -1, "vofflock", 0); } fp->f_vnread_flags |= FOFFSET_LOCKED; } res = fp->f_offset; mtx_unlock(mtxp); return (res); } void foffset_unlock(struct file *fp, off_t val, int flags) { struct mtx *mtxp; KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); #if OFF_MAX <= LONG_MAX if ((flags & FOF_NOLOCK) != 0) { if ((flags & FOF_NOUPDATE) == 0) fp->f_offset = val; if ((flags & FOF_NEXTOFF) != 0) fp->f_nextoff = val; return; } #endif mtxp = mtx_pool_find(mtxpool_sleep, fp); mtx_lock(mtxp); if ((flags & FOF_NOUPDATE) == 0) fp->f_offset = val; if ((flags & FOF_NEXTOFF) != 0) fp->f_nextoff = val; if ((flags & FOF_NOLOCK) == 0) { KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, ("Lost FOFFSET_LOCKED")); if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) wakeup(&fp->f_vnread_flags); fp->f_vnread_flags = 0; } mtx_unlock(mtxp); } void foffset_lock_uio(struct file *fp, struct uio *uio, int flags) { if ((flags & FOF_OFFSET) == 0) uio->uio_offset = foffset_lock(fp, flags); } void foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) { if ((flags & FOF_OFFSET) == 0) foffset_unlock(fp, uio->uio_offset, flags); } static int get_advice(struct file *fp, struct uio *uio) { struct mtx *mtxp; int ret; ret = POSIX_FADV_NORMAL; if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG) return (ret); mtxp = mtx_pool_find(mtxpool_sleep, fp); mtx_lock(mtxp); if (fp->f_advice != NULL && uio->uio_offset >= fp->f_advice->fa_start && uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) ret = fp->f_advice->fa_advice; mtx_unlock(mtxp); return (ret); } /* * File table vnode read routine. */ static int vn_read(fp, uio, active_cred, flags, td) struct file *fp; struct uio *uio; struct ucred *active_cred; int flags; struct thread *td; { struct vnode *vp; off_t orig_offset; int error, ioflag; int advice; KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); vp = fp->f_vnode; ioflag = 0; if (fp->f_flag & FNONBLOCK) ioflag |= IO_NDELAY; if (fp->f_flag & O_DIRECT) ioflag |= IO_DIRECT; advice = get_advice(fp, uio); vn_lock(vp, LK_SHARED | LK_RETRY); switch (advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_SEQUENTIAL: case POSIX_FADV_NOREUSE: ioflag |= sequential_heuristic(uio, fp); break; case POSIX_FADV_RANDOM: /* Disable read-ahead for random I/O. */ break; } orig_offset = uio->uio_offset; #ifdef MAC error = mac_vnode_check_read(active_cred, fp->f_cred, vp); if (error == 0) #endif error = VOP_READ(vp, uio, ioflag, fp->f_cred); fp->f_nextoff = uio->uio_offset; VOP_UNLOCK(vp, 0); if (error == 0 && advice == POSIX_FADV_NOREUSE && orig_offset != uio->uio_offset) /* * Use POSIX_FADV_DONTNEED to flush pages and buffers * for the backing file after a POSIX_FADV_NOREUSE * read(2). */ error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, POSIX_FADV_DONTNEED); return (error); } /* * File table vnode write routine. */ static int vn_write(fp, uio, active_cred, flags, td) struct file *fp; struct uio *uio; struct ucred *active_cred; int flags; struct thread *td; { struct vnode *vp; struct mount *mp; off_t orig_offset; int error, ioflag, lock_flags; int advice; KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); vp = fp->f_vnode; if (vp->v_type == VREG) bwillwrite(); ioflag = IO_UNIT; if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) ioflag |= IO_APPEND; if (fp->f_flag & FNONBLOCK) ioflag |= IO_NDELAY; if (fp->f_flag & O_DIRECT) ioflag |= IO_DIRECT; if ((fp->f_flag & O_FSYNC) || (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) ioflag |= IO_SYNC; mp = NULL; if (vp->v_type != VCHR && (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) goto unlock; advice = get_advice(fp, uio); if (MNT_SHARED_WRITES(mp) || (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { lock_flags = LK_SHARED; } else { lock_flags = LK_EXCLUSIVE; } vn_lock(vp, lock_flags | LK_RETRY); switch (advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_SEQUENTIAL: case POSIX_FADV_NOREUSE: ioflag |= sequential_heuristic(uio, fp); break; case POSIX_FADV_RANDOM: /* XXX: Is this correct? */ break; } orig_offset = uio->uio_offset; #ifdef MAC error = mac_vnode_check_write(active_cred, fp->f_cred, vp); if (error == 0) #endif error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); fp->f_nextoff = uio->uio_offset; VOP_UNLOCK(vp, 0); if (vp->v_type != VCHR) vn_finished_write(mp); if (error == 0 && advice == POSIX_FADV_NOREUSE && orig_offset != uio->uio_offset) /* * Use POSIX_FADV_DONTNEED to flush pages and buffers * for the backing file after a POSIX_FADV_NOREUSE * write(2). */ error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, POSIX_FADV_DONTNEED); unlock: return (error); } /* * The vn_io_fault() is a wrapper around vn_read() and vn_write() to * prevent the following deadlock: * * Assume that the thread A reads from the vnode vp1 into userspace * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is * currently not resident, then system ends up with the call chain * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) * which establishes lock order vp1->vn_lock, then vp2->vn_lock. * If, at the same time, thread B reads from vnode vp2 into buffer buf2 * backed by the pages of vnode vp1, and some page in buf2 is not * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. * * To prevent the lock order reversal and deadlock, vn_io_fault() does * not allow page faults to happen during VOP_READ() or VOP_WRITE(). * Instead, it first tries to do the whole range i/o with pagefaults * disabled. If all pages in the i/o buffer are resident and mapped, * VOP will succeed (ignoring the genuine filesystem errors). * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do * i/o in chunks, with all pages in the chunk prefaulted and held * using vm_fault_quick_hold_pages(). * * Filesystems using this deadlock avoidance scheme should use the * array of the held pages from uio, saved in the curthread->td_ma, * instead of doing uiomove(). A helper function * vn_io_fault_uiomove() converts uiomove request into * uiomove_fromphys() over td_ma array. * * Since vnode locks do not cover the whole i/o anymore, rangelocks * make the current i/o request atomic with respect to other i/os and * truncations. */ /* * Decode vn_io_fault_args and perform the corresponding i/o. */ static int vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, struct thread *td) { switch (args->kind) { case VN_IO_FAULT_FOP: return ((args->args.fop_args.doio)(args->args.fop_args.fp, uio, args->cred, args->flags, td)); case VN_IO_FAULT_VOP: if (uio->uio_rw == UIO_READ) { return (VOP_READ(args->args.vop_args.vp, uio, args->flags, args->cred)); } else if (uio->uio_rw == UIO_WRITE) { return (VOP_WRITE(args->args.vop_args.vp, uio, args->flags, args->cred)); } break; } panic("vn_io_fault_doio: unknown kind of io %d %d", args->kind, uio->uio_rw); } static int vn_io_fault_touch(char *base, const struct uio *uio) { int r; r = fubyte(base); if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1)) return (EFAULT); return (0); } static int vn_io_fault_prefault_user(const struct uio *uio) { char *base; const struct iovec *iov; size_t len; ssize_t resid; int error, i; KASSERT(uio->uio_segflg == UIO_USERSPACE, ("vn_io_fault_prefault userspace")); error = i = 0; iov = uio->uio_iov; resid = uio->uio_resid; base = iov->iov_base; len = iov->iov_len; while (resid > 0) { error = vn_io_fault_touch(base, uio); if (error != 0) break; if (len < PAGE_SIZE) { if (len != 0) { error = vn_io_fault_touch(base + len - 1, uio); if (error != 0) break; resid -= len; } if (++i >= uio->uio_iovcnt) break; iov = uio->uio_iov + i; base = iov->iov_base; len = iov->iov_len; } else { len -= PAGE_SIZE; base += PAGE_SIZE; resid -= PAGE_SIZE; } } return (error); } /* * Common code for vn_io_fault(), agnostic to the kind of i/o request. * Uses vn_io_fault_doio() to make the call to an actual i/o function. * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request * into args and call vn_io_fault1() to handle faults during the user * mode buffer accesses. */ static int vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, struct thread *td) { vm_page_t ma[io_hold_cnt + 2]; struct uio *uio_clone, short_uio; struct iovec short_iovec[1]; vm_page_t *prev_td_ma; vm_prot_t prot; vm_offset_t addr, end; size_t len, resid; ssize_t adv; int error, cnt, save, saveheld, prev_td_ma_cnt; if (vn_io_fault_prefault) { error = vn_io_fault_prefault_user(uio); if (error != 0) return (error); /* Or ignore ? */ } prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; /* * The UFS follows IO_UNIT directive and replays back both * uio_offset and uio_resid if an error is encountered during the * operation. But, since the iovec may be already advanced, * uio is still in an inconsistent state. * * Cache a copy of the original uio, which is advanced to the redo * point using UIO_NOCOPY below. */ uio_clone = cloneuio(uio); resid = uio->uio_resid; short_uio.uio_segflg = UIO_USERSPACE; short_uio.uio_rw = uio->uio_rw; short_uio.uio_td = uio->uio_td; save = vm_fault_disable_pagefaults(); error = vn_io_fault_doio(args, uio, td); if (error != EFAULT) goto out; atomic_add_long(&vn_io_faults_cnt, 1); uio_clone->uio_segflg = UIO_NOCOPY; uiomove(NULL, resid - uio->uio_resid, uio_clone); uio_clone->uio_segflg = uio->uio_segflg; saveheld = curthread_pflags_set(TDP_UIOHELD); prev_td_ma = td->td_ma; prev_td_ma_cnt = td->td_ma_cnt; while (uio_clone->uio_resid != 0) { len = uio_clone->uio_iov->iov_len; if (len == 0) { KASSERT(uio_clone->uio_iovcnt >= 1, ("iovcnt underflow")); uio_clone->uio_iov++; uio_clone->uio_iovcnt--; continue; } if (len > io_hold_cnt * PAGE_SIZE) len = io_hold_cnt * PAGE_SIZE; addr = (uintptr_t)uio_clone->uio_iov->iov_base; end = round_page(addr + len); if (end < addr) { error = EFAULT; break; } cnt = atop(end - trunc_page(addr)); /* * A perfectly misaligned address and length could cause * both the start and the end of the chunk to use partial * page. +2 accounts for such a situation. */ cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, addr, len, prot, ma, io_hold_cnt + 2); if (cnt == -1) { error = EFAULT; break; } short_uio.uio_iov = &short_iovec[0]; short_iovec[0].iov_base = (void *)addr; short_uio.uio_iovcnt = 1; short_uio.uio_resid = short_iovec[0].iov_len = len; short_uio.uio_offset = uio_clone->uio_offset; td->td_ma = ma; td->td_ma_cnt = cnt; error = vn_io_fault_doio(args, &short_uio, td); vm_page_unhold_pages(ma, cnt); adv = len - short_uio.uio_resid; uio_clone->uio_iov->iov_base = (char *)uio_clone->uio_iov->iov_base + adv; uio_clone->uio_iov->iov_len -= adv; uio_clone->uio_resid -= adv; uio_clone->uio_offset += adv; uio->uio_resid -= adv; uio->uio_offset += adv; if (error != 0 || adv == 0) break; } td->td_ma = prev_td_ma; td->td_ma_cnt = prev_td_ma_cnt; curthread_pflags_restore(saveheld); out: vm_fault_enable_pagefaults(save); free(uio_clone, M_IOV); return (error); } static int vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { fo_rdwr_t *doio; struct vnode *vp; void *rl_cookie; struct vn_io_fault_args args; int error; doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; vp = fp->f_vnode; foffset_lock_uio(fp, uio, flags); if (do_vn_io_fault(vp, uio)) { args.kind = VN_IO_FAULT_FOP; args.args.fop_args.fp = fp; args.args.fop_args.doio = doio; args.cred = active_cred; args.flags = flags | FOF_OFFSET; if (uio->uio_rw == UIO_READ) { rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, uio->uio_offset + uio->uio_resid); } else if ((fp->f_flag & O_APPEND) != 0 || (flags & FOF_OFFSET) == 0) { /* For appenders, punt and lock the whole range. */ rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); } else { rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, uio->uio_offset + uio->uio_resid); } error = vn_io_fault1(vp, uio, &args, td); vn_rangelock_unlock(vp, rl_cookie); } else { error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); } foffset_unlock_uio(fp, uio, flags); return (error); } /* * Helper function to perform the requested uiomove operation using * the held pages for io->uio_iov[0].iov_base buffer instead of * copyin/copyout. Access to the pages with uiomove_fromphys() * instead of iov_base prevents page faults that could occur due to * pmap_collect() invalidating the mapping created by * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or * object cleanup revoking the write access from page mappings. * * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() * instead of plain uiomove(). */ int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) { struct uio transp_uio; struct iovec transp_iov[1]; struct thread *td; size_t adv; int error, pgadv; td = curthread; if ((td->td_pflags & TDP_UIOHELD) == 0 || uio->uio_segflg != UIO_USERSPACE) return (uiomove(data, xfersize, uio)); KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); transp_iov[0].iov_base = data; transp_uio.uio_iov = &transp_iov[0]; transp_uio.uio_iovcnt = 1; if (xfersize > uio->uio_resid) xfersize = uio->uio_resid; transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; transp_uio.uio_offset = 0; transp_uio.uio_segflg = UIO_SYSSPACE; /* * Since transp_iov points to data, and td_ma page array * corresponds to original uio->uio_iov, we need to invert the * direction of the i/o operation as passed to * uiomove_fromphys(). */ switch (uio->uio_rw) { case UIO_WRITE: transp_uio.uio_rw = UIO_READ; break; case UIO_READ: transp_uio.uio_rw = UIO_WRITE; break; } transp_uio.uio_td = uio->uio_td; error = uiomove_fromphys(td->td_ma, ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, xfersize, &transp_uio); adv = xfersize - transp_uio.uio_resid; pgadv = (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); td->td_ma += pgadv; KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, pgadv)); td->td_ma_cnt -= pgadv; uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; uio->uio_iov->iov_len -= adv; uio->uio_resid -= adv; uio->uio_offset += adv; return (error); } int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, struct uio *uio) { struct thread *td; vm_offset_t iov_base; int cnt, pgadv; td = curthread; if ((td->td_pflags & TDP_UIOHELD) == 0 || uio->uio_segflg != UIO_USERSPACE) return (uiomove_fromphys(ma, offset, xfersize, uio)); KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; iov_base = (vm_offset_t)uio->uio_iov->iov_base; switch (uio->uio_rw) { case UIO_WRITE: pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, offset, cnt); break; case UIO_READ: pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, cnt); break; } pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); td->td_ma += pgadv; KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, pgadv)); td->td_ma_cnt -= pgadv; uio->uio_iov->iov_base = (char *)(iov_base + cnt); uio->uio_iov->iov_len -= cnt; uio->uio_resid -= cnt; uio->uio_offset += cnt; return (0); } /* * File table truncate routine. */ static int vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td) { struct vattr vattr; struct mount *mp; struct vnode *vp; void *rl_cookie; int error; vp = fp->f_vnode; /* * Lock the whole range for truncation. Otherwise split i/o * might happen partly before and partly after the truncation. */ rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); error = vn_start_write(vp, &mp, V_WAIT | PCATCH); if (error) goto out1; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); AUDIT_ARG_VNODE1(vp); if (vp->v_type == VDIR) { error = EISDIR; goto out; } #ifdef MAC error = mac_vnode_check_write(active_cred, fp->f_cred, vp); if (error) goto out; #endif error = vn_writechk(vp); if (error == 0) { VATTR_NULL(&vattr); vattr.va_size = length; if ((fp->f_flag & O_FSYNC) != 0) vattr.va_vaflags |= VA_SYNC; error = VOP_SETATTR(vp, &vattr, fp->f_cred); } out: VOP_UNLOCK(vp, 0); vn_finished_write(mp); out1: vn_rangelock_unlock(vp, rl_cookie); return (error); } /* * File table vnode stat routine. */ static int vn_statfile(fp, sb, active_cred, td) struct file *fp; struct stat *sb; struct ucred *active_cred; struct thread *td; { struct vnode *vp = fp->f_vnode; int error; vn_lock(vp, LK_SHARED | LK_RETRY); error = vn_stat(vp, sb, active_cred, fp->f_cred, td); VOP_UNLOCK(vp, 0); return (error); } /* * Stat a vnode; implementation for the stat syscall */ int vn_stat(vp, sb, active_cred, file_cred, td) struct vnode *vp; register struct stat *sb; struct ucred *active_cred; struct ucred *file_cred; struct thread *td; { struct vattr vattr; register struct vattr *vap; int error; u_short mode; AUDIT_ARG_VNODE1(vp); #ifdef MAC error = mac_vnode_check_stat(active_cred, file_cred, vp); if (error) return (error); #endif vap = &vattr; /* * Initialize defaults for new and unusual fields, so that file * systems which don't support these fields don't need to know * about them. */ vap->va_birthtime.tv_sec = -1; vap->va_birthtime.tv_nsec = 0; vap->va_fsid = VNOVAL; vap->va_rdev = NODEV; error = VOP_GETATTR(vp, vap, active_cred); if (error) return (error); /* * Zero the spare stat fields */ bzero(sb, sizeof *sb); /* * Copy from vattr table */ if (vap->va_fsid != VNOVAL) sb->st_dev = vap->va_fsid; else sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; sb->st_ino = vap->va_fileid; mode = vap->va_mode; switch (vap->va_type) { case VREG: mode |= S_IFREG; break; case VDIR: mode |= S_IFDIR; break; case VBLK: mode |= S_IFBLK; break; case VCHR: mode |= S_IFCHR; break; case VLNK: mode |= S_IFLNK; break; case VSOCK: mode |= S_IFSOCK; break; case VFIFO: mode |= S_IFIFO; break; default: return (EBADF); } sb->st_mode = mode; sb->st_nlink = vap->va_nlink; sb->st_uid = vap->va_uid; sb->st_gid = vap->va_gid; sb->st_rdev = vap->va_rdev; if (vap->va_size > OFF_MAX) return (EOVERFLOW); sb->st_size = vap->va_size; sb->st_atim = vap->va_atime; sb->st_mtim = vap->va_mtime; sb->st_ctim = vap->va_ctime; sb->st_birthtim = vap->va_birthtime; /* * According to www.opengroup.org, the meaning of st_blksize is * "a filesystem-specific preferred I/O block size for this * object. In some filesystem types, this may vary from file * to file" * Use miminum/default of PAGE_SIZE (e.g. for VCHR). */ sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); sb->st_flags = vap->va_flags; if (priv_check(td, PRIV_VFS_GENERATION)) sb->st_gen = 0; else sb->st_gen = vap->va_gen; sb->st_blocks = vap->va_bytes / S_BLKSIZE; return (0); } /* * File table vnode ioctl routine. */ static int vn_ioctl(fp, com, data, active_cred, td) struct file *fp; u_long com; void *data; struct ucred *active_cred; struct thread *td; { struct vattr vattr; struct vnode *vp; int error; vp = fp->f_vnode; switch (vp->v_type) { case VDIR: case VREG: switch (com) { case FIONREAD: vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_GETATTR(vp, &vattr, active_cred); VOP_UNLOCK(vp, 0); if (error == 0) *(int *)data = vattr.va_size - fp->f_offset; return (error); case FIONBIO: case FIOASYNC: return (0); default: return (VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td)); } break; case VCHR: return (VOP_IOCTL(vp, com, data, fp->f_flag, active_cred, td)); default: return (ENOTTY); } } /* * File table vnode poll routine. */ static int vn_poll(fp, events, active_cred, td) struct file *fp; int events; struct ucred *active_cred; struct thread *td; { struct vnode *vp; int error; vp = fp->f_vnode; #ifdef MAC vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); AUDIT_ARG_VNODE1(vp); error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); VOP_UNLOCK(vp, 0); if (!error) #endif error = VOP_POLL(vp, events, fp->f_cred, td); return (error); } /* * Acquire the requested lock and then check for validity. LK_RETRY * permits vn_lock to return doomed vnodes. */ int _vn_lock(struct vnode *vp, int flags, char *file, int line) { int error; VNASSERT((flags & LK_TYPE_MASK) != 0, vp, - ("vn_lock called with no locktype.")); -#ifdef DEBUG_VFS_LOCKS - KASSERT(vp->v_holdcnt != 0, - ("vn_lock %p: zero hold count", vp)); -#endif + ("vn_lock: no locktype")); + VNASSERT(vp->v_holdcnt != 0, vp, ("vn_lock: zero hold count")); retry: error = VOP_LOCK1(vp, flags, file, line); flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ KASSERT((flags & LK_RETRY) == 0 || error == 0, - ("LK_RETRY set with incompatible flags (0x%x) or " - " an error occurred (%d)", flags, error)); + ("vn_lock: error %d incompatible with flags %#x", error, flags)); if ((flags & LK_RETRY) == 0) { - if (error == 0 && vp->v_iflag & VI_DOOMED) { + if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) { VOP_UNLOCK(vp, 0); error = ENOENT; } - } else { - if (error != 0) - goto retry; - } + } else if (error != 0) + goto retry; return (error); } /* * File table vnode close routine. */ static int -vn_closefile(fp, td) - struct file *fp; - struct thread *td; +vn_closefile(struct file *fp, struct thread *td) { struct vnode *vp; struct flock lf; int error; vp = fp->f_vnode; fp->f_ops = &badfileops; - if (__predict_false(fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) + if (__predict_false((fp->f_flag & FHASLOCK) != 0) && + fp->f_type == DTYPE_VNODE) vrefact(vp); error = vn_close(vp, fp->f_flag, fp->f_cred, td); - if (__predict_false(fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) { + if (__predict_false((fp->f_flag & FHASLOCK) != 0) && + fp->f_type == DTYPE_VNODE) { lf.l_whence = SEEK_SET; lf.l_start = 0; lf.l_len = 0; lf.l_type = F_UNLCK; (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); vrele(vp); } return (error); } static bool vn_suspendable(struct mount *mp) { return (mp->mnt_op->vfs_susp_clean != NULL); } /* * Preparing to start a filesystem write operation. If the operation is * permitted, then we bump the count of operations in progress and * proceed. If a suspend request is in progress, we wait until the * suspension is over, and then proceed. */ static int vn_start_write_locked(struct mount *mp, int flags) { int error, mflags; mtx_assert(MNT_MTX(mp), MA_OWNED); error = 0; /* * Check on status of suspension. */ if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || mp->mnt_susp_owner != curthread) { mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0) | (PUSER - 1); while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { if (flags & V_NOWAIT) { error = EWOULDBLOCK; goto unlock; } error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, "suspfs", 0); if (error) goto unlock; } } if (flags & V_XSLEEP) goto unlock; mp->mnt_writeopcount++; unlock: if (error != 0 || (flags & V_XSLEEP) != 0) MNT_REL(mp); MNT_IUNLOCK(mp); return (error); } int vn_start_write(struct vnode *vp, struct mount **mpp, int flags) { struct mount *mp; int error; KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), ("V_MNTREF requires mp")); error = 0; /* * If a vnode is provided, get and return the mount point that * to which it will write. */ if (vp != NULL) { if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { *mpp = NULL; if (error != EOPNOTSUPP) return (error); return (0); } } if ((mp = *mpp) == NULL) return (0); if (!vn_suspendable(mp)) { if (vp != NULL || (flags & V_MNTREF) != 0) vfs_rel(mp); return (0); } /* * VOP_GETWRITEMOUNT() returns with the mp refcount held through * a vfs_ref(). * As long as a vnode is not provided we need to acquire a * refcount for the provided mountpoint too, in order to * emulate a vfs_ref(). */ MNT_ILOCK(mp); if (vp == NULL && (flags & V_MNTREF) == 0) MNT_REF(mp); return (vn_start_write_locked(mp, flags)); } /* * Secondary suspension. Used by operations such as vop_inactive * routines that are needed by the higher level functions. These * are allowed to proceed until all the higher level functions have * completed (indicated by mnt_writeopcount dropping to zero). At that * time, these operations are halted until the suspension is over. */ int vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags) { struct mount *mp; int error; KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), ("V_MNTREF requires mp")); retry: if (vp != NULL) { if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { *mpp = NULL; if (error != EOPNOTSUPP) return (error); return (0); } } /* * If we are not suspended or have not yet reached suspended * mode, then let the operation proceed. */ if ((mp = *mpp) == NULL) return (0); if (!vn_suspendable(mp)) { if (vp != NULL || (flags & V_MNTREF) != 0) vfs_rel(mp); return (0); } /* * VOP_GETWRITEMOUNT() returns with the mp refcount held through * a vfs_ref(). * As long as a vnode is not provided we need to acquire a * refcount for the provided mountpoint too, in order to * emulate a vfs_ref(). */ MNT_ILOCK(mp); if (vp == NULL && (flags & V_MNTREF) == 0) MNT_REF(mp); if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { mp->mnt_secondary_writes++; mp->mnt_secondary_accwrites++; MNT_IUNLOCK(mp); return (0); } if (flags & V_NOWAIT) { MNT_REL(mp); MNT_IUNLOCK(mp); return (EWOULDBLOCK); } /* * Wait for the suspension to finish. */ error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), "suspfs", 0); vfs_rel(mp); if (error == 0) goto retry; return (error); } /* * Filesystem write operation has completed. If we are suspending and this * operation is the last one, notify the suspender that the suspension is * now in effect. */ void vn_finished_write(mp) struct mount *mp; { if (mp == NULL || !vn_suspendable(mp)) return; MNT_ILOCK(mp); MNT_REL(mp); mp->mnt_writeopcount--; if (mp->mnt_writeopcount < 0) panic("vn_finished_write: neg cnt"); if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && mp->mnt_writeopcount <= 0) wakeup(&mp->mnt_writeopcount); MNT_IUNLOCK(mp); } /* * Filesystem secondary write operation has completed. If we are * suspending and this operation is the last one, notify the suspender * that the suspension is now in effect. */ void vn_finished_secondary_write(mp) struct mount *mp; { if (mp == NULL || !vn_suspendable(mp)) return; MNT_ILOCK(mp); MNT_REL(mp); mp->mnt_secondary_writes--; if (mp->mnt_secondary_writes < 0) panic("vn_finished_secondary_write: neg cnt"); if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && mp->mnt_secondary_writes <= 0) wakeup(&mp->mnt_secondary_writes); MNT_IUNLOCK(mp); } /* * Request a filesystem to suspend write operations. */ int vfs_write_suspend(struct mount *mp, int flags) { int error; MPASS(vn_suspendable(mp)); MNT_ILOCK(mp); if (mp->mnt_susp_owner == curthread) { MNT_IUNLOCK(mp); return (EALREADY); } while (mp->mnt_kern_flag & MNTK_SUSPEND) msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); /* * Unmount holds a write reference on the mount point. If we * own busy reference and drain for writers, we deadlock with * the reference draining in the unmount path. Callers of * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if * vfs_busy() reference is owned and caller is not in the * unmount context. */ if ((flags & VS_SKIP_UNMOUNT) != 0 && (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { MNT_IUNLOCK(mp); return (EBUSY); } mp->mnt_kern_flag |= MNTK_SUSPEND; mp->mnt_susp_owner = curthread; if (mp->mnt_writeopcount > 0) (void) msleep(&mp->mnt_writeopcount, MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); else MNT_IUNLOCK(mp); if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) vfs_write_resume(mp, 0); return (error); } /* * Request a filesystem to resume write operations. */ void vfs_write_resume(struct mount *mp, int flags) { MPASS(vn_suspendable(mp)); MNT_ILOCK(mp); if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | MNTK_SUSPENDED); mp->mnt_susp_owner = NULL; wakeup(&mp->mnt_writeopcount); wakeup(&mp->mnt_flag); curthread->td_pflags &= ~TDP_IGNSUSP; if ((flags & VR_START_WRITE) != 0) { MNT_REF(mp); mp->mnt_writeopcount++; } MNT_IUNLOCK(mp); if ((flags & VR_NO_SUSPCLR) == 0) VFS_SUSP_CLEAN(mp); } else if ((flags & VR_START_WRITE) != 0) { MNT_REF(mp); vn_start_write_locked(mp, 0); } else { MNT_IUNLOCK(mp); } } /* * Helper loop around vfs_write_suspend() for filesystem unmount VFS * methods. */ int vfs_write_suspend_umnt(struct mount *mp) { int error; MPASS(vn_suspendable(mp)); KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, ("vfs_write_suspend_umnt: recursed")); /* dounmount() already called vn_start_write(). */ for (;;) { vn_finished_write(mp); error = vfs_write_suspend(mp, 0); if (error != 0) { vn_start_write(NULL, &mp, V_WAIT); return (error); } MNT_ILOCK(mp); if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) break; MNT_IUNLOCK(mp); vn_start_write(NULL, &mp, V_WAIT); } mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); wakeup(&mp->mnt_flag); MNT_IUNLOCK(mp); curthread->td_pflags |= TDP_IGNSUSP; return (0); } /* * Implement kqueues for files by translating it to vnode operation. */ static int vn_kqfilter(struct file *fp, struct knote *kn) { return (VOP_KQFILTER(fp->f_vnode, kn)); } /* * Simplified in-kernel wrapper calls for extended attribute access. * Both calls pass in a NULL credential, authorizing as "kernel" access. * Set IO_NODELOCKED in ioflg if the vnode is already locked. */ int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, int *buflen, char *buf, struct thread *td) { struct uio auio; struct iovec iov; int error; iov.iov_len = *buflen; iov.iov_base = buf; auio.uio_iov = &iov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_offset = 0; auio.uio_resid = *buflen; if ((ioflg & IO_NODELOCKED) == 0) vn_lock(vp, LK_SHARED | LK_RETRY); ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); /* authorize attribute retrieval as kernel */ error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, td); if ((ioflg & IO_NODELOCKED) == 0) VOP_UNLOCK(vp, 0); if (error == 0) { *buflen = *buflen - auio.uio_resid; } return (error); } /* * XXX failure mode if partially written? */ int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, int buflen, char *buf, struct thread *td) { struct uio auio; struct iovec iov; struct mount *mp; int error; iov.iov_len = buflen; iov.iov_base = buf; auio.uio_iov = &iov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_WRITE; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = td; auio.uio_offset = 0; auio.uio_resid = buflen; if ((ioflg & IO_NODELOCKED) == 0) { if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) return (error); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); /* authorize attribute setting as kernel */ error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); if ((ioflg & IO_NODELOCKED) == 0) { vn_finished_write(mp); VOP_UNLOCK(vp, 0); } return (error); } int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, struct thread *td) { struct mount *mp; int error; if ((ioflg & IO_NODELOCKED) == 0) { if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) return (error); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); /* authorize attribute removal as kernel */ error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); if (error == EOPNOTSUPP) error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td); if ((ioflg & IO_NODELOCKED) == 0) { vn_finished_write(mp); VOP_UNLOCK(vp, 0); } return (error); } static int vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, struct vnode **rvp) { return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); } int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) { return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, lkflags, rvp)); } int vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, int lkflags, struct vnode **rvp) { struct mount *mp; int ltype, error; ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); mp = vp->v_mount; ltype = VOP_ISLOCKED(vp); KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, ("vn_vget_ino: vp not locked")); error = vfs_busy(mp, MBF_NOWAIT); if (error != 0) { vfs_ref(mp); VOP_UNLOCK(vp, 0); error = vfs_busy(mp, 0); vn_lock(vp, ltype | LK_RETRY); vfs_rel(mp); if (error != 0) return (ENOENT); if (vp->v_iflag & VI_DOOMED) { vfs_unbusy(mp); return (ENOENT); } } VOP_UNLOCK(vp, 0); error = alloc(mp, alloc_arg, lkflags, rvp); vfs_unbusy(mp); if (*rvp != vp) vn_lock(vp, ltype | LK_RETRY); if (vp->v_iflag & VI_DOOMED) { if (error == 0) { if (*rvp == vp) vunref(vp); else vput(*rvp); } error = ENOENT; } return (error); } int vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, struct thread *td) { if (vp->v_type != VREG || td == NULL) return (0); if ((uoff_t)uio->uio_offset + uio->uio_resid > lim_cur(td, RLIMIT_FSIZE)) { PROC_LOCK(td->td_proc); kern_psignal(td->td_proc, SIGXFSZ); PROC_UNLOCK(td->td_proc); return (EFBIG); } return (0); } int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td) { struct vnode *vp; vp = fp->f_vnode; #ifdef AUDIT vn_lock(vp, LK_SHARED | LK_RETRY); AUDIT_ARG_VNODE1(vp); VOP_UNLOCK(vp, 0); #endif return (setfmode(td, active_cred, vp, mode)); } int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td) { struct vnode *vp; vp = fp->f_vnode; #ifdef AUDIT vn_lock(vp, LK_SHARED | LK_RETRY); AUDIT_ARG_VNODE1(vp); VOP_UNLOCK(vp, 0); #endif return (setfown(td, active_cred, vp, uid, gid)); } void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) { vm_object_t object; if ((object = vp->v_object) == NULL) return; VM_OBJECT_WLOCK(object); vm_object_page_remove(object, start, end, 0); VM_OBJECT_WUNLOCK(object); } int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) { struct vattr va; daddr_t bn, bnp; uint64_t bsize; off_t noff; int error; KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, ("Wrong command %lu", cmd)); if (vn_lock(vp, LK_SHARED) != 0) return (EBADF); if (vp->v_type != VREG) { error = ENOTTY; goto unlock; } error = VOP_GETATTR(vp, &va, cred); if (error != 0) goto unlock; noff = *off; if (noff >= va.va_size) { error = ENXIO; goto unlock; } bsize = vp->v_mount->mnt_stat.f_iosize; for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) { error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); if (error == EOPNOTSUPP) { error = ENOTTY; goto unlock; } if ((bnp == -1 && cmd == FIOSEEKHOLE) || (bnp != -1 && cmd == FIOSEEKDATA)) { noff = bn * bsize; if (noff < *off) noff = *off; goto unlock; } } if (noff > va.va_size) noff = va.va_size; /* noff == va.va_size. There is an implicit hole at the end of file. */ if (cmd == FIOSEEKDATA) error = ENXIO; unlock: VOP_UNLOCK(vp, 0); if (error == 0) *off = noff; return (error); } int vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) { struct ucred *cred; struct vnode *vp; struct vattr vattr; off_t foffset, size; int error, noneg; cred = td->td_ucred; vp = fp->f_vnode; foffset = foffset_lock(fp, 0); noneg = (vp->v_type != VCHR); error = 0; switch (whence) { case L_INCR: if (noneg && (foffset < 0 || (offset > 0 && foffset > OFF_MAX - offset))) { error = EOVERFLOW; break; } offset += foffset; break; case L_XTND: vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_GETATTR(vp, &vattr, cred); VOP_UNLOCK(vp, 0); if (error) break; /* * If the file references a disk device, then fetch * the media size and use that to determine the ending * offset. */ if (vattr.va_size == 0 && vp->v_type == VCHR && fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) vattr.va_size = size; if (noneg && (vattr.va_size > OFF_MAX || (offset > 0 && vattr.va_size > OFF_MAX - offset))) { error = EOVERFLOW; break; } offset += vattr.va_size; break; case L_SET: break; case SEEK_DATA: error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); break; case SEEK_HOLE: error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); break; default: error = EINVAL; } if (error == 0 && noneg && offset < 0) error = EINVAL; if (error != 0) goto drop; VFS_KNOTE_UNLOCKED(vp, 0); td->td_uretoff.tdu_off = offset; drop: foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); return (error); } int vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct thread *td) { int error; /* * Grant permission if the caller is the owner of the file, or * the super-user, or has ACL_WRITE_ATTRIBUTES permission on * on the file. If the time pointer is null, then write * permission on the file is also sufficient. * * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES * will be allowed to set the times [..] to the current * server time. */ error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) error = VOP_ACCESS(vp, VWRITE, cred, td); return (error); } int vn_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { struct vnode *vp; int error; if (fp->f_type == DTYPE_FIFO) kif->kf_type = KF_TYPE_FIFO; else kif->kf_type = KF_TYPE_VNODE; vp = fp->f_vnode; vref(vp); FILEDESC_SUNLOCK(fdp); error = vn_fill_kinfo_vnode(vp, kif); vrele(vp); FILEDESC_SLOCK(fdp); return (error); } static inline void vn_fill_junk(struct kinfo_file *kif) { size_t len, olen; /* * Simulate vn_fullpath returning changing values for a given * vp during e.g. coredump. */ len = (arc4random() % (sizeof(kif->kf_path) - 2)) + 1; olen = strlen(kif->kf_path); if (len < olen) strcpy(&kif->kf_path[len - 1], "$"); else for (; olen < len; olen++) strcpy(&kif->kf_path[olen], "A"); } int vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif) { struct vattr va; char *fullpath, *freepath; int error; kif->kf_vnode_type = vntype_to_kinfo(vp->v_type); freepath = NULL; fullpath = "-"; error = vn_fullpath(curthread, vp, &fullpath, &freepath); if (error == 0) { strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); } if (freepath != NULL) free(freepath, M_TEMP); KFAIL_POINT_CODE(DEBUG_FP, fill_kinfo_vnode__random_path, vn_fill_junk(kif); ); /* * Retrieve vnode attributes. */ va.va_fsid = VNOVAL; va.va_rdev = NODEV; vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_GETATTR(vp, &va, curthread->td_ucred); VOP_UNLOCK(vp, 0); if (error != 0) return (error); if (va.va_fsid != VNOVAL) kif->kf_un.kf_file.kf_file_fsid = va.va_fsid; else kif->kf_un.kf_file.kf_file_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; kif->kf_un.kf_file.kf_file_fileid = va.va_fileid; kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode); kif->kf_un.kf_file.kf_file_size = va.va_size; kif->kf_un.kf_file.kf_file_rdev = va.va_rdev; return (0); } int vn_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { #ifdef HWPMC_HOOKS struct pmckern_map_in pkm; #endif struct mount *mp; struct vnode *vp; vm_object_t object; vm_prot_t maxprot; boolean_t writecounted; int error; #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \ defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) /* * POSIX shared-memory objects are defined to have * kernel persistence, and are not defined to support * read(2)/write(2) -- or even open(2). Thus, we can * use MAP_ASYNC to trade on-disk coherence for speed. * The shm_open(3) library routine turns on the FPOSIXSHM * flag to request this behavior. */ if ((fp->f_flag & FPOSIXSHM) != 0) flags |= MAP_NOSYNC; #endif vp = fp->f_vnode; /* * Ensure that file and memory protections are * compatible. Note that we only worry about * writability if mapping is shared; in this case, * current and max prot are dictated by the open file. * XXX use the vnode instead? Problem is: what * credentials do we use for determination? What if * proc does a setuid? */ mp = vp->v_mount; if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) maxprot = VM_PROT_NONE; else maxprot = VM_PROT_EXECUTE; if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_READ; else if ((prot & VM_PROT_READ) != 0) return (EACCES); /* * If we are sharing potential changes via MAP_SHARED and we * are trying to get write permission although we opened it * without asking for it, bail out. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_flag & FWRITE) != 0) maxprot |= VM_PROT_WRITE; else if ((prot & VM_PROT_WRITE) != 0) return (EACCES); } else { maxprot |= VM_PROT_WRITE; cap_maxprot |= VM_PROT_WRITE; } maxprot &= cap_maxprot; writecounted = FALSE; error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, vp, &foff, &object, &writecounted); if (error != 0) return (error); error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, foff, writecounted, td); if (error != 0) { /* * If this mapping was accounted for in the vnode's * writecount, then undo that now. */ if (writecounted) vnode_pager_release_writecount(object, 0, size); vm_object_deallocate(object); } #ifdef HWPMC_HOOKS /* Inform hwpmc(4) if an executable is being mapped. */ if (error == 0 && (prot & VM_PROT_EXECUTE) != 0) { pkm.pm_file = vp; pkm.pm_address = (uintptr_t) *addr; PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm); } #endif return (error); } Index: projects/clang400-import/sys/libkern/bcd.c =================================================================== --- projects/clang400-import/sys/libkern/bcd.c (revision 312719) +++ projects/clang400-import/sys/libkern/bcd.c (revision 312720) @@ -1,38 +1,42 @@ /*- * Some data-tables that are often used. * Cannot be copyrighted. */ #include __FBSDID("$FreeBSD$"); +#include #include u_char const bcd2bin_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 0, 0, 0, 0, 0, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 0, 0, 0, 0, 0, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 0, 0, 0, 0, 0, 0, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 0, 0, 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, 0, 0, 0, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 0, 0, 0, 0, 0, 0, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 0, 0, 0, 0, 0, 0, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 }; +CTASSERT(nitems(bcd2bin_data) == LIBKERN_LEN_BCD2BIN); u_char const bin2bcd_data[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99 }; +CTASSERT(nitems(bin2bcd_data) == LIBKERN_LEN_BIN2BCD); /* This is actually used with radix [2..36] */ char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz"; +CTASSERT(nitems(hex2ascii_data) == LIBKERN_LEN_HEX2ASCII + 1); Index: projects/clang400-import/sys/modules/rtwn/Makefile =================================================================== --- projects/clang400-import/sys/modules/rtwn/Makefile (revision 312719) +++ projects/clang400-import/sys/modules/rtwn/Makefile (revision 312720) @@ -1,45 +1,50 @@ # $FreeBSD$ .PATH: ${.CURDIR}/../../dev/rtwn SYSDIR?=${.CURDIR}/../.. .include "${SYSDIR}/conf/kern.opts.mk" KMOD = if_rtwn SRCS = if_rtwn.c if_rtwn_tx.c if_rtwn_rx.c if_rtwn_beacon.c \ if_rtwn_calib.c if_rtwn_cam.c if_rtwn_task.c if_rtwn_efuse.c \ if_rtwn_fw.c if_rtwn_nop.h if_rtwnreg.h if_rtwnvar.h if_rtwn_tx.h \ if_rtwn_rx.h if_rtwn_beacon.h if_rtwn_calib.h if_rtwn_cam.h \ if_rtwn_task.h if_rtwn_efuse.h if_rtwn_fw.h \ bus_if.h device_if.h \ opt_bus.h opt_rtwn.h opt_wlan.h .PATH: ${.CURDIR}/../../dev/rtwn/rtl8192c SRCS += r92c_attach.c r92c_beacon.c r92c_calib.c r92c_chan.c r92c_fw.c \ - r92c_init.c r92c_rf.c r92c_rom.c r92c_rx.c r92c_tx.c \ + r92c_init.c r92c_llt.c r92c_rf.c r92c_rom.c r92c_rx.c r92c_tx.c \ r92c.h r92c_priv.h r92c_reg.h r92c_var.h r92c_rom_defs.h \ r92c_rom_image.h r92c_fw_cmd.h r92c_rx_desc.h r92c_tx_desc.h .PATH: ${.CURDIR}/../../dev/rtwn/rtl8188e SRCS += r88e_beacon.c r88e_calib.c r88e_chan.c r88e_fw.c r88e_init.c \ r88e_led.c r88e_rf.c r88e_rom.c r88e_rx.c r88e_tx.c r88e.h \ r88e_priv.h r88e_reg.h r88e_rom_defs.h r88e_rom_image.h \ r88e_fw_cmd.h r88e_rx_desc.h r88e_tx_desc.h + +.PATH: ${.CURDIR}/../../dev/rtwn/rtl8192e +SRCS += r92e_chan.c r92e_fw.c r92e_init.c r92e_led.c r92e_rf.c \ + r92e_rom.c r92e_rx.c r92e.h r92e_priv.h r92e_reg.h \ + r92e_rom_image.h r92e_rom_defs.h .PATH: ${.CURDIR}/../../dev/rtwn/rtl8812a SRCS += r12a_beacon.c r12a_calib.c r12a_caps.c r12a_chan.c r12a_fw.c \ r12a_init.c r12a_led.c r12a_rf.c r12a_rom.c r12a_rx.c r12a_tx.c \ r12a.h r12a_priv.h r12a_reg.h r12a_var.h r12a_rom_defs.h \ r12a_rom_image.h r12a_fw_cmd.h r12a_rx_desc.h r12a_tx_desc.h .PATH: ${.CURDIR}/../../dev/rtwn/rtl8821a SRCS += r21a_beacon.c r21a_calib.c r21a_chan.c r21a_fw.c r21a_init.c \ r21a_led.c r21a_rom.c r21a_rx.c r21a.h r21a_priv.h r21a_reg.h opt_rtwn.h: @echo "#define RTWN_DEBUG 1" > ${.TARGET} .if ${MK_SOURCELESS_UCODE} == "no" @echo "#define RTWN_WITHOUT_UCODE 1" >> ${.TARGET} .endif .include Index: projects/clang400-import/sys/modules/rtwn_usb/Makefile =================================================================== --- projects/clang400-import/sys/modules/rtwn_usb/Makefile (revision 312719) +++ projects/clang400-import/sys/modules/rtwn_usb/Makefile (revision 312720) @@ -1,37 +1,40 @@ # $FreeBSD$ .PATH: ${.CURDIR}/../../dev/rtwn/usb SYSDIR?=${.CURDIR}/../.. .include "${SYSDIR}/conf/kern.opts.mk" KMOD = if_rtwn_usb SRCS = rtwn_usb_attach.c rtwn_usb_ep.c rtwn_usb_reg.c rtwn_usb_rx.c \ rtwn_usb_tx.c rtwn_usb_attach.h rtwn_usb_ep.h rtwn_usb_reg.h \ rtwn_usb_rx.h rtwn_usb_tx.h rtwn_usb_var.h \ bus_if.h device_if.h \ opt_bus.h opt_rtwn.h opt_usb.h opt_wlan.h usb_if.h usbdevs.h .PATH: ${.CURDIR}/../../dev/rtwn/rtl8188e/usb SRCS += r88eu_attach.c r88eu_init.c r88eu_rx.c \ r88eu.h r88eu_reg.h .PATH: ${.CURDIR}/../../dev/rtwn/rtl8192c/usb SRCS += r92cu_attach.c r92cu_init.c r92cu_led.c r92cu_rx.c r92cu_tx.c \ r92cu.h r92cu_priv.h r92cu_reg.h r92cu_tx_desc.h +.PATH: ${.CURDIR}/../../dev/rtwn/rtl8192e/usb +SRCS += r92eu_attach.c r92eu_init.c r92eu.h r92eu_reg.h + .PATH: ${.CURDIR}/../../dev/rtwn/rtl8812a/usb SRCS += r12au_attach.c r12au_init.c r12au_rx.c r12au_tx.c \ r12au.h r12au_reg.h r12au_tx_desc.h .PATH: ${.CURDIR}/../../dev/rtwn/rtl8821a/usb SRCS += r21au_attach.c r21au_init.c r21au_dfs.c \ r21au.h r21au_reg.h opt_rtwn.h: @echo "#define RTWN_DEBUG 1" > ${.TARGET} .if ${MK_SOURCELESS_UCODE} == "no" @echo "#define RTWN_WITHOUT_UCODE 1" >> ${.TARGET} .endif .include Index: projects/clang400-import/sys/modules/rtwnfw/Makefile =================================================================== --- projects/clang400-import/sys/modules/rtwnfw/Makefile (revision 312719) +++ projects/clang400-import/sys/modules/rtwnfw/Makefile (revision 312720) @@ -1,6 +1,6 @@ # $FreeBSD$ -SUBDIR= rtwnrtl8188eu rtwnrtl8192cT rtwnrtl8192cU rtwnrtl8812au rtwnrtl8821au \ - rtwnrtl8192cE rtwnrtl8192cEB +SUBDIR= rtwnrtl8188eu rtwnrtl8192cT rtwnrtl8192cU rtwnrtl8192eu rtwnrtl8812au \ + rtwnrtl8821au rtwnrtl8192cE rtwnrtl8192cEB .include Index: projects/clang400-import/sys/modules/rtwnfw/rtwnrtl8192eu/Makefile =================================================================== --- projects/clang400-import/sys/modules/rtwnfw/rtwnrtl8192eu/Makefile (nonexistent) +++ projects/clang400-import/sys/modules/rtwnfw/rtwnrtl8192eu/Makefile (revision 312720) @@ -0,0 +1,6 @@ +# $FreeBSD$ + +KMOD= rtwn-rtl8192eufw +IMG= rtwn-rtl8192eufw + +.include Property changes on: projects/clang400-import/sys/modules/rtwnfw/rtwnrtl8192eu/Makefile ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/sys/net/ieee8023ad_lacp.c =================================================================== --- projects/clang400-import/sys/net/ieee8023ad_lacp.c (revision 312719) +++ projects/clang400-import/sys/net/ieee8023ad_lacp.c (revision 312720) @@ -1,2100 +1,2095 @@ /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */ /*- * Copyright (c)2005 YAMAMOTO Takashi, * Copyright (c)2008 Andrew Thompson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ratelimit.h" #include #include #include #include #include #include #include /* hz */ #include /* for net/if.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * actor system priority and port priority. * XXX should be configurable. */ #define LACP_SYSTEM_PRIO 0x8000 #define LACP_PORT_PRIO 0x8000 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; static const struct tlv_template lacp_info_tlv_template[] = { { LACP_TYPE_ACTORINFO, sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, { LACP_TYPE_PARTNERINFO, sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, { LACP_TYPE_COLLECTORINFO, sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, { 0, 0 }, }; static const struct tlv_template marker_info_tlv_template[] = { { MARKER_TYPE_INFO, sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, { 0, 0 }, }; static const struct tlv_template marker_response_tlv_template[] = { { MARKER_TYPE_RESPONSE, sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, { 0, 0 }, }; typedef void (*lacp_timer_func_t)(struct lacp_port *); static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); static void lacp_fill_markerinfo(struct lacp_port *, struct lacp_markerinfo *); static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); static void lacp_suppress_distributing(struct lacp_softc *, struct lacp_aggregator *); static void lacp_transit_expire(void *); static void lacp_update_portmap(struct lacp_softc *); static void lacp_select_active_aggregator(struct lacp_softc *); static uint16_t lacp_compose_key(struct lacp_port *); static int tlv_check(const void *, size_t, const struct tlvhdr *, const struct tlv_template *, boolean_t); static void lacp_tick(void *); static void lacp_fill_aggregator_id(struct lacp_aggregator *, const struct lacp_port *); static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, const struct lacp_peerinfo *); static int lacp_aggregator_is_compatible(const struct lacp_aggregator *, const struct lacp_port *); static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, const struct lacp_peerinfo *); static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, struct lacp_port *); static void lacp_aggregator_addref(struct lacp_softc *, struct lacp_aggregator *); static void lacp_aggregator_delref(struct lacp_softc *, struct lacp_aggregator *); /* receive machine */ static int lacp_pdu_input(struct lacp_port *, struct mbuf *); static int lacp_marker_input(struct lacp_port *, struct mbuf *); static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); static void lacp_sm_rx_timer(struct lacp_port *); static void lacp_sm_rx_set_expired(struct lacp_port *); static void lacp_sm_rx_update_ntt(struct lacp_port *, const struct lacpdu *); static void lacp_sm_rx_record_pdu(struct lacp_port *, const struct lacpdu *); static void lacp_sm_rx_update_selected(struct lacp_port *, const struct lacpdu *); static void lacp_sm_rx_record_default(struct lacp_port *); static void lacp_sm_rx_update_default_selected(struct lacp_port *); static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, const struct lacp_peerinfo *); /* mux machine */ static void lacp_sm_mux(struct lacp_port *); static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); static void lacp_sm_mux_timer(struct lacp_port *); /* periodic transmit machine */ static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); static void lacp_sm_ptx_tx_schedule(struct lacp_port *); static void lacp_sm_ptx_timer(struct lacp_port *); /* transmit machine */ static void lacp_sm_tx(struct lacp_port *); static void lacp_sm_assert_ntt(struct lacp_port *); static void lacp_run_timers(struct lacp_port *); static int lacp_compare_peerinfo(const struct lacp_peerinfo *, const struct lacp_peerinfo *); static int lacp_compare_systemid(const struct lacp_systemid *, const struct lacp_systemid *); static void lacp_port_enable(struct lacp_port *); static void lacp_port_disable(struct lacp_port *); static void lacp_select(struct lacp_port *); static void lacp_unselect(struct lacp_port *); static void lacp_disable_collecting(struct lacp_port *); static void lacp_enable_collecting(struct lacp_port *); static void lacp_disable_distributing(struct lacp_port *); static void lacp_enable_distributing(struct lacp_port *); static int lacp_xmit_lacpdu(struct lacp_port *); static int lacp_xmit_marker(struct lacp_port *); /* Debugging */ static void lacp_dump_lacpdu(const struct lacpdu *); static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, size_t); static const char *lacp_format_lagid(const struct lacp_peerinfo *, const struct lacp_peerinfo *, char *, size_t); static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, char *, size_t); static const char *lacp_format_state(uint8_t, char *, size_t); static const char *lacp_format_mac(const uint8_t *, char *, size_t); static const char *lacp_format_systemid(const struct lacp_systemid *, char *, size_t); static const char *lacp_format_portid(const struct lacp_portid *, char *, size_t); static void lacp_dprintf(const struct lacp_port *, const char *, ...) __attribute__((__format__(__printf__, 2, 3))); static VNET_DEFINE(int, lacp_debug); #define V_lacp_debug VNET(lacp_debug) SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad"); SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)"); static VNET_DEFINE(int, lacp_default_strict_mode) = 1; SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, CTLFLAG_RWTUN, &VNET_NAME(lacp_default_strict_mode), 0, "LACP strict protocol compliance default"); #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; } #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; } /* * partner administration variables. * XXX should be configurable. */ static const struct lacp_peerinfo lacp_partner_admin_optimistic = { .lip_systemid = { .lsi_prio = 0xffff }, .lip_portid = { .lpi_prio = 0xffff }, .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, }; static const struct lacp_peerinfo lacp_partner_admin_strict = { .lip_systemid = { .lsi_prio = 0xffff }, .lip_portid = { .lpi_prio = 0xffff }, .lip_state = 0, }; static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, }; struct mbuf * lacp_input(struct lagg_port *lgp, struct mbuf *m) { struct lacp_port *lp = LACP_PORT(lgp); uint8_t subtype; if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { m_freem(m); return (NULL); } m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); switch (subtype) { case SLOWPROTOCOLS_SUBTYPE_LACP: lacp_pdu_input(lp, m); return (NULL); case SLOWPROTOCOLS_SUBTYPE_MARKER: lacp_marker_input(lp, m); return (NULL); } /* Not a subtype we are interested in */ return (m); } /* * lacp_pdu_input: process lacpdu */ static int lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) { struct lacp_softc *lsc = lp->lp_lsc; struct lacpdu *du; int error = 0; if (m->m_pkthdr.len != sizeof(*du)) { goto bad; } if ((m->m_flags & M_MCAST) == 0) { goto bad; } if (m->m_len < sizeof(*du)) { m = m_pullup(m, sizeof(*du)); if (m == NULL) { return (ENOMEM); } } du = mtod(m, struct lacpdu *); if (memcmp(&du->ldu_eh.ether_dhost, ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { goto bad; } /* * ignore the version for compatibility with * the future protocol revisions. */ #if 0 if (du->ldu_sph.sph_version != 1) { goto bad; } #endif /* * ignore tlv types for compatibility with * the future protocol revisions. */ if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, lacp_info_tlv_template, FALSE)) { goto bad; } if (V_lacp_debug > 0) { lacp_dprintf(lp, "lacpdu receive\n"); lacp_dump_lacpdu(du); } if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { LACP_TPRINTF((lp, "Dropping RX PDU\n")); goto bad; } LACP_LOCK(lsc); lacp_sm_rx(lp, du); LACP_UNLOCK(lsc); m_freem(m); return (error); bad: m_freem(m); return (EINVAL); } static void lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) { struct lagg_port *lgp = lp->lp_lagg; struct lagg_softc *sc = lgp->lp_softc; info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); memcpy(&info->lip_systemid.lsi_mac, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); info->lip_state = lp->lp_state; } static void lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) { struct ifnet *ifp = lp->lp_ifp; /* Fill in the port index and system id (encoded as the MAC) */ info->mi_rq_port = htons(ifp->if_index); memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); info->mi_rq_xid = htonl(0); } static int lacp_xmit_lacpdu(struct lacp_port *lp) { struct lagg_port *lgp = lp->lp_lagg; struct mbuf *m; struct lacpdu *du; int error; LACP_LOCK_ASSERT(lp->lp_lsc); m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { return (ENOMEM); } m->m_len = m->m_pkthdr.len = sizeof(*du); du = mtod(m, struct lacpdu *); memset(du, 0, sizeof(*du)); memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN); memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; du->ldu_sph.sph_version = 1; TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); du->ldu_actor = lp->lp_actor; TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, sizeof(du->ldu_partner)); du->ldu_partner = lp->lp_partner; TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, sizeof(du->ldu_collector)); du->ldu_collector.lci_maxdelay = 0; if (V_lacp_debug > 0) { lacp_dprintf(lp, "lacpdu transmit\n"); lacp_dump_lacpdu(du); } m->m_flags |= M_MCAST; /* * XXX should use higher priority queue. * otherwise network congestion can break aggregation. */ error = lagg_enqueue(lp->lp_ifp, m); return (error); } static int lacp_xmit_marker(struct lacp_port *lp) { struct lagg_port *lgp = lp->lp_lagg; struct mbuf *m; struct markerdu *mdu; int error; LACP_LOCK_ASSERT(lp->lp_lsc); m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { return (ENOMEM); } m->m_len = m->m_pkthdr.len = sizeof(*mdu); mdu = mtod(m, struct markerdu *); memset(mdu, 0, sizeof(*mdu)); memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN); memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; mdu->mdu_sph.sph_version = 1; /* Bump the transaction id and copy over the marker info */ lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); mdu->mdu_info = lp->lp_marker; LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n", ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", ntohl(mdu->mdu_info.mi_rq_xid))); m->m_flags |= M_MCAST; error = lagg_enqueue(lp->lp_ifp, m); return (error); } void lacp_linkstate(struct lagg_port *lgp) { struct lacp_port *lp = LACP_PORT(lgp); struct lacp_softc *lsc = lp->lp_lsc; struct ifnet *ifp = lgp->lp_ifp; struct ifmediareq ifmr; int error = 0; u_int media; uint8_t old_state; uint16_t old_key; bzero((char *)&ifmr, sizeof(ifmr)); error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr); if (error != 0) return; LACP_LOCK(lsc); media = ifmr.ifm_active; LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); old_state = lp->lp_state; old_key = lp->lp_key; lp->lp_media = media; /* * If the port is not an active full duplex Ethernet link then it can * not be aggregated. */ if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || ifp->if_link_state != LINK_STATE_UP) { lacp_port_disable(lp); } else { lacp_port_enable(lp); } lp->lp_key = lacp_compose_key(lp); if (old_state != lp->lp_state || old_key != lp->lp_key) { LACP_DPRINTF((lp, "-> UNSELECTED\n")); lp->lp_selected = LACP_UNSELECTED; } LACP_UNLOCK(lsc); } static void lacp_tick(void *arg) { struct lacp_softc *lsc = arg; struct lacp_port *lp; LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) continue; CURVNET_SET(lp->lp_ifp->if_vnet); lacp_run_timers(lp); lacp_select(lp); lacp_sm_mux(lp); lacp_sm_tx(lp); lacp_sm_ptx_tx_schedule(lp); CURVNET_RESTORE(); } callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); } int lacp_port_create(struct lagg_port *lgp) { struct lagg_softc *sc = lgp->lp_softc; struct lacp_softc *lsc = LACP_SOFTC(sc); struct lacp_port *lp; struct ifnet *ifp = lgp->lp_ifp; struct sockaddr_dl sdl; struct ifmultiaddr *rifma = NULL; int error; - boolean_t active = TRUE; /* XXX should be configurable */ - boolean_t fast = FALSE; /* Configurable via ioctl */ - link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER); sdl.sdl_alen = ETHER_ADDR_LEN; bcopy(ðermulticastaddr_slowprotocols, LLADDR(&sdl), ETHER_ADDR_LEN); error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); if (error) { printf("%s: ADDMULTI failed on %s\n", __func__, lgp->lp_ifp->if_xname); return (error); } lp = malloc(sizeof(struct lacp_port), M_DEVBUF, M_NOWAIT|M_ZERO); if (lp == NULL) return (ENOMEM); LACP_LOCK(lsc); lgp->lp_psc = lp; lp->lp_ifp = ifp; lp->lp_lagg = lgp; lp->lp_lsc = lsc; lp->lp_ifma = rifma; LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); lacp_fill_actorinfo(lp, &lp->lp_actor); lacp_fill_markerinfo(lp, &lp->lp_marker); - lp->lp_state = - (active ? LACP_STATE_ACTIVITY : 0) | - (fast ? LACP_STATE_TIMEOUT : 0); + lp->lp_state = LACP_STATE_ACTIVITY; lp->lp_aggregator = NULL; lacp_sm_rx_set_expired(lp); LACP_UNLOCK(lsc); lacp_linkstate(lgp); return (0); } void lacp_port_destroy(struct lagg_port *lgp) { struct lacp_port *lp = LACP_PORT(lgp); struct lacp_softc *lsc = lp->lp_lsc; int i; LACP_LOCK(lsc); for (i = 0; i < LACP_NTIMER; i++) { LACP_TIMER_DISARM(lp, i); } lacp_disable_collecting(lp); lacp_disable_distributing(lp); lacp_unselect(lp); LIST_REMOVE(lp, lp_next); LACP_UNLOCK(lsc); /* The address may have already been removed by if_purgemaddrs() */ if (!lgp->lp_detaching) if_delmulti_ifma(lp->lp_ifma); free(lp, M_DEVBUF); } void lacp_req(struct lagg_softc *sc, void *data) { struct lacp_opreq *req = (struct lacp_opreq *)data; struct lacp_softc *lsc = LACP_SOFTC(sc); struct lacp_aggregator *la; bzero(req, sizeof(struct lacp_opreq)); /* * If the LACP softc is NULL, return with the opreq structure full of * zeros. It is normal for the softc to be NULL while the lagg is * being destroyed. */ if (NULL == lsc) return; la = lsc->lsc_active_aggregator; LACP_LOCK(lsc); if (la != NULL) { req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, ETHER_ADDR_LEN); req->actor_key = ntohs(la->la_actor.lip_key); req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); req->actor_state = la->la_actor.lip_state; req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, ETHER_ADDR_LEN); req->partner_key = ntohs(la->la_partner.lip_key); req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); req->partner_state = la->la_partner.lip_state; } LACP_UNLOCK(lsc); } void lacp_portreq(struct lagg_port *lgp, void *data) { struct lacp_opreq *req = (struct lacp_opreq *)data; struct lacp_port *lp = LACP_PORT(lgp); struct lacp_softc *lsc = lp->lp_lsc; LACP_LOCK(lsc); req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, ETHER_ADDR_LEN); req->actor_key = ntohs(lp->lp_actor.lip_key); req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); req->actor_state = lp->lp_actor.lip_state; req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, ETHER_ADDR_LEN); req->partner_key = ntohs(lp->lp_partner.lip_key); req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); req->partner_state = lp->lp_partner.lip_state; LACP_UNLOCK(lsc); } static void lacp_disable_collecting(struct lacp_port *lp) { LACP_DPRINTF((lp, "collecting disabled\n")); lp->lp_state &= ~LACP_STATE_COLLECTING; } static void lacp_enable_collecting(struct lacp_port *lp) { LACP_DPRINTF((lp, "collecting enabled\n")); lp->lp_state |= LACP_STATE_COLLECTING; } static void lacp_disable_distributing(struct lacp_port *lp) { struct lacp_aggregator *la = lp->lp_aggregator; struct lacp_softc *lsc = lp->lp_lsc; struct lagg_softc *sc = lsc->lsc_softc; char buf[LACP_LAGIDSTR_MAX+1]; LACP_LOCK_ASSERT(lsc); if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { return; } KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); LACP_DPRINTF((lp, "disable distributing on aggregator %s, " "nports %d -> %d\n", lacp_format_lagid_aggregator(la, buf, sizeof(buf)), la->la_nports, la->la_nports - 1)); TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); la->la_nports--; sc->sc_active = la->la_nports; if (lsc->lsc_active_aggregator == la) { lacp_suppress_distributing(lsc, la); lacp_select_active_aggregator(lsc); /* regenerate the port map, the active aggregator has changed */ lacp_update_portmap(lsc); } lp->lp_state &= ~LACP_STATE_DISTRIBUTING; } static void lacp_enable_distributing(struct lacp_port *lp) { struct lacp_aggregator *la = lp->lp_aggregator; struct lacp_softc *lsc = lp->lp_lsc; struct lagg_softc *sc = lsc->lsc_softc; char buf[LACP_LAGIDSTR_MAX+1]; LACP_LOCK_ASSERT(lsc); if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { return; } LACP_DPRINTF((lp, "enable distributing on aggregator %s, " "nports %d -> %d\n", lacp_format_lagid_aggregator(la, buf, sizeof(buf)), la->la_nports, la->la_nports + 1)); KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); la->la_nports++; sc->sc_active = la->la_nports; lp->lp_state |= LACP_STATE_DISTRIBUTING; if (lsc->lsc_active_aggregator == la) { lacp_suppress_distributing(lsc, la); lacp_update_portmap(lsc); } else /* try to become the active aggregator */ lacp_select_active_aggregator(lsc); } static void lacp_transit_expire(void *vp) { struct lacp_softc *lsc = vp; LACP_LOCK_ASSERT(lsc); CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet); LACP_TRACE(NULL); CURVNET_RESTORE(); lsc->lsc_suppress_distributing = FALSE; } void lacp_attach(struct lagg_softc *sc) { struct lacp_softc *lsc; lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_psc = lsc; lsc->lsc_softc = sc; lsc->lsc_hashkey = m_ether_tcpip_hash_init(); lsc->lsc_active_aggregator = NULL; lsc->lsc_strict_mode = VNET(lacp_default_strict_mode); LACP_LOCK_INIT(lsc); TAILQ_INIT(&lsc->lsc_aggregators); LIST_INIT(&lsc->lsc_ports); callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0); callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0); /* if the lagg is already up then do the same */ if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) lacp_init(sc); } void lacp_detach(void *psc) { struct lacp_softc *lsc = (struct lacp_softc *)psc; KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), ("aggregators still active")); KASSERT(lsc->lsc_active_aggregator == NULL, ("aggregator still attached")); callout_drain(&lsc->lsc_transit_callout); callout_drain(&lsc->lsc_callout); LACP_LOCK_DESTROY(lsc); free(lsc, M_DEVBUF); } void lacp_init(struct lagg_softc *sc) { struct lacp_softc *lsc = LACP_SOFTC(sc); LACP_LOCK(lsc); callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); LACP_UNLOCK(lsc); } void lacp_stop(struct lagg_softc *sc) { struct lacp_softc *lsc = LACP_SOFTC(sc); LACP_LOCK(lsc); callout_stop(&lsc->lsc_transit_callout); callout_stop(&lsc->lsc_callout); LACP_UNLOCK(lsc); } struct lagg_port * lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m) { struct lacp_softc *lsc = LACP_SOFTC(sc); struct lacp_portmap *pm; struct lacp_port *lp; uint32_t hash; if (__predict_false(lsc->lsc_suppress_distributing)) { LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); return (NULL); } pm = &lsc->lsc_pmap[lsc->lsc_activemap]; if (pm->pm_count == 0) { LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); return (NULL); } if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) hash = m->m_pkthdr.flowid >> sc->flowid_shift; else hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey); hash %= pm->pm_count; lp = pm->pm_map[hash]; KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0, ("aggregated port is not distributing")); return (lp->lp_lagg); } #ifdef RATELIMIT struct lagg_port * lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t flowid) { struct lacp_softc *lsc = LACP_SOFTC(sc); struct lacp_portmap *pm; struct lacp_port *lp; uint32_t hash; if (__predict_false(lsc->lsc_suppress_distributing)) { LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); return (NULL); } pm = &lsc->lsc_pmap[lsc->lsc_activemap]; if (pm->pm_count == 0) { LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); return (NULL); } hash = flowid >> sc->flowid_shift; hash %= pm->pm_count; lp = pm->pm_map[hash]; return (lp->lp_lagg); } #endif /* * lacp_suppress_distributing: drop transmit packets for a while * to preserve packet ordering. */ static void lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) { struct lacp_port *lp; if (lsc->lsc_active_aggregator != la) { return; } LACP_TRACE(NULL); lsc->lsc_suppress_distributing = TRUE; /* send a marker frame down each port to verify the queues are empty */ LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { lp->lp_flags |= LACP_PORT_MARK; lacp_xmit_marker(lp); } /* set a timeout for the marker frames */ callout_reset(&lsc->lsc_transit_callout, LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); } static int lacp_compare_peerinfo(const struct lacp_peerinfo *a, const struct lacp_peerinfo *b) { return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); } static int lacp_compare_systemid(const struct lacp_systemid *a, const struct lacp_systemid *b) { return (memcmp(a, b, sizeof(*a))); } #if 0 /* unused */ static int lacp_compare_portid(const struct lacp_portid *a, const struct lacp_portid *b) { return (memcmp(a, b, sizeof(*a))); } #endif static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *la) { struct lacp_port *lp; uint64_t speed; lp = TAILQ_FIRST(&la->la_ports); if (lp == NULL) { return (0); } speed = ifmedia_baudrate(lp->lp_media); speed *= la->la_nports; if (speed == 0) { LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", lp->lp_media, la->la_nports)); } return (speed); } /* * lacp_select_active_aggregator: select an aggregator to be used to transmit * packets from lagg(4) interface. */ static void lacp_select_active_aggregator(struct lacp_softc *lsc) { struct lacp_aggregator *la; struct lacp_aggregator *best_la = NULL; uint64_t best_speed = 0; char buf[LACP_LAGIDSTR_MAX+1]; LACP_TRACE(NULL); TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { uint64_t speed; if (la->la_nports == 0) { continue; } speed = lacp_aggregator_bandwidth(la); LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", lacp_format_lagid_aggregator(la, buf, sizeof(buf)), speed, la->la_nports)); /* * This aggregator is chosen if the partner has a better * system priority or, the total aggregated speed is higher * or, it is already the chosen aggregator */ if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < LACP_SYS_PRI(best_la->la_partner)) || speed > best_speed || (speed == best_speed && la == lsc->lsc_active_aggregator)) { best_la = la; best_speed = speed; } } KASSERT(best_la == NULL || best_la->la_nports > 0, ("invalid aggregator refcnt")); KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), ("invalid aggregator list")); if (lsc->lsc_active_aggregator != best_la) { LACP_DPRINTF((NULL, "active aggregator changed\n")); LACP_DPRINTF((NULL, "old %s\n", lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, buf, sizeof(buf)))); } else { LACP_DPRINTF((NULL, "active aggregator not changed\n")); } LACP_DPRINTF((NULL, "new %s\n", lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); if (lsc->lsc_active_aggregator != best_la) { lsc->lsc_active_aggregator = best_la; lacp_update_portmap(lsc); if (best_la) { lacp_suppress_distributing(lsc, best_la); } } } /* * Updated the inactive portmap array with the new list of ports and * make it live. */ static void lacp_update_portmap(struct lacp_softc *lsc) { struct lagg_softc *sc = lsc->lsc_softc; struct lacp_aggregator *la; struct lacp_portmap *p; struct lacp_port *lp; uint64_t speed; u_int newmap; int i; newmap = lsc->lsc_activemap == 0 ? 1 : 0; p = &lsc->lsc_pmap[newmap]; la = lsc->lsc_active_aggregator; speed = 0; bzero(p, sizeof(struct lacp_portmap)); if (la != NULL && la->la_nports > 0) { p->pm_count = la->la_nports; i = 0; TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) p->pm_map[i++] = lp; KASSERT(i == p->pm_count, ("Invalid port count")); speed = lacp_aggregator_bandwidth(la); } sc->sc_ifp->if_baudrate = speed; /* switch the active portmap over */ atomic_store_rel_int(&lsc->lsc_activemap, newmap); LACP_DPRINTF((NULL, "Set table %d with %d ports\n", lsc->lsc_activemap, lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); } static uint16_t lacp_compose_key(struct lacp_port *lp) { struct lagg_port *lgp = lp->lp_lagg; struct lagg_softc *sc = lgp->lp_softc; u_int media = lp->lp_media; uint16_t key; if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { /* * non-aggregatable links should have unique keys. * * XXX this isn't really unique as if_index is 16 bit. */ /* bit 0..14: (some bits of) if_index of this port */ key = lp->lp_ifp->if_index; /* bit 15: 1 */ key |= 0x8000; } else { u_int subtype = IFM_SUBTYPE(media); KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); /* bit 0..4: IFM_SUBTYPE modulo speed */ switch (subtype) { case IFM_10_T: case IFM_10_2: case IFM_10_5: case IFM_10_STP: case IFM_10_FL: key = IFM_10_T; break; case IFM_100_TX: case IFM_100_FX: case IFM_100_T4: case IFM_100_VG: case IFM_100_T2: case IFM_100_T: key = IFM_100_TX; break; case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: case IFM_1000_KX: case IFM_1000_SGMII: case IFM_1000_CX_SGMII: key = IFM_1000_SX; break; case IFM_10G_LR: case IFM_10G_SR: case IFM_10G_CX4: case IFM_10G_TWINAX: case IFM_10G_TWINAX_LONG: case IFM_10G_LRM: case IFM_10G_T: case IFM_10G_KX4: case IFM_10G_KR: case IFM_10G_CR1: case IFM_10G_ER: case IFM_10G_SFI: key = IFM_10G_LR; break; case IFM_20G_KR2: key = IFM_20G_KR2; break; case IFM_2500_KX: case IFM_2500_T: key = IFM_2500_KX; break; case IFM_5000_T: key = IFM_5000_T; break; case IFM_50G_PCIE: case IFM_50G_CR2: case IFM_50G_KR2: key = IFM_50G_PCIE; break; case IFM_56G_R4: key = IFM_56G_R4; break; case IFM_25G_PCIE: case IFM_25G_CR: case IFM_25G_KR: case IFM_25G_SR: key = IFM_25G_PCIE; break; case IFM_40G_CR4: case IFM_40G_SR4: case IFM_40G_LR4: case IFM_40G_XLPPI: case IFM_40G_KR4: key = IFM_40G_CR4; break; case IFM_100G_CR4: case IFM_100G_SR4: case IFM_100G_KR4: case IFM_100G_LR4: key = IFM_100G_CR4; break; default: key = subtype; break; } /* bit 5..14: (some bits of) if_index of lagg device */ key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); /* bit 15: 0 */ } return (htons(key)); } static void lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) { char buf[LACP_LAGIDSTR_MAX+1]; LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", __func__, lacp_format_lagid(&la->la_actor, &la->la_partner, buf, sizeof(buf)), la->la_refcnt, la->la_refcnt + 1)); KASSERT(la->la_refcnt > 0, ("refcount <= 0")); la->la_refcnt++; KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); } static void lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) { char buf[LACP_LAGIDSTR_MAX+1]; LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", __func__, lacp_format_lagid(&la->la_actor, &la->la_partner, buf, sizeof(buf)), la->la_refcnt, la->la_refcnt - 1)); KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); la->la_refcnt--; if (la->la_refcnt > 0) { return; } KASSERT(la->la_refcnt == 0, ("refcount not zero")); KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); free(la, M_DEVBUF); } /* * lacp_aggregator_get: allocate an aggregator. */ static struct lacp_aggregator * lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) { struct lacp_aggregator *la; la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT); if (la) { la->la_refcnt = 1; la->la_nports = 0; TAILQ_INIT(&la->la_ports); la->la_pending = 0; TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); } return (la); } /* * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. */ static void lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) { lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; } static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, const struct lacp_peerinfo *lpi_port) { memset(lpi_aggr, 0, sizeof(*lpi_aggr)); lpi_aggr->lip_systemid = lpi_port->lip_systemid; lpi_aggr->lip_key = lpi_port->lip_key; } /* * lacp_aggregator_is_compatible: check if a port can join to an aggregator. */ static int lacp_aggregator_is_compatible(const struct lacp_aggregator *la, const struct lacp_port *lp) { if (!(lp->lp_state & LACP_STATE_AGGREGATION) || !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { return (0); } if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) { return (0); } if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) { return (0); } if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) { return (0); } return (1); } static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, const struct lacp_peerinfo *b) { if (memcmp(&a->lip_systemid, &b->lip_systemid, sizeof(a->lip_systemid))) { return (0); } if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) { return (0); } return (1); } static void lacp_port_enable(struct lacp_port *lp) { lp->lp_state |= LACP_STATE_AGGREGATION; } static void lacp_port_disable(struct lacp_port *lp) { lacp_set_mux(lp, LACP_MUX_DETACHED); lp->lp_state &= ~LACP_STATE_AGGREGATION; lp->lp_selected = LACP_UNSELECTED; lacp_sm_rx_record_default(lp); lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; lp->lp_state &= ~LACP_STATE_EXPIRED; } /* * lacp_select: select an aggregator. create one if necessary. */ static void lacp_select(struct lacp_port *lp) { struct lacp_softc *lsc = lp->lp_lsc; struct lacp_aggregator *la; char buf[LACP_LAGIDSTR_MAX+1]; if (lp->lp_aggregator) { return; } KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), ("timer_wait_while still active")); LACP_DPRINTF((lp, "port lagid=%s\n", lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, buf, sizeof(buf)))); TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { if (lacp_aggregator_is_compatible(la, lp)) { break; } } if (la == NULL) { la = lacp_aggregator_get(lsc, lp); if (la == NULL) { LACP_DPRINTF((lp, "aggregator creation failed\n")); /* * will retry on the next tick. */ return; } lacp_fill_aggregator_id(la, lp); LACP_DPRINTF((lp, "aggregator created\n")); } else { LACP_DPRINTF((lp, "compatible aggregator found\n")); if (la->la_refcnt == LACP_MAX_PORTS) return; lacp_aggregator_addref(lsc, la); } LACP_DPRINTF((lp, "aggregator lagid=%s\n", lacp_format_lagid(&la->la_actor, &la->la_partner, buf, sizeof(buf)))); lp->lp_aggregator = la; lp->lp_selected = LACP_SELECTED; } /* * lacp_unselect: finish unselect/detach process. */ static void lacp_unselect(struct lacp_port *lp) { struct lacp_softc *lsc = lp->lp_lsc; struct lacp_aggregator *la = lp->lp_aggregator; KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), ("timer_wait_while still active")); if (la == NULL) { return; } lp->lp_aggregator = NULL; lacp_aggregator_delref(lsc, la); } /* mux machine */ static void lacp_sm_mux(struct lacp_port *lp) { struct lagg_port *lgp = lp->lp_lagg; struct lagg_softc *sc = lgp->lp_softc; enum lacp_mux_state new_state; boolean_t p_sync = (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; boolean_t p_collecting = (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; enum lacp_selected selected = lp->lp_selected; struct lacp_aggregator *la; if (V_lacp_debug > 1) lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, lp->lp_mux_state, selected, p_sync, p_collecting); re_eval: la = lp->lp_aggregator; KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, ("MUX not detached")); new_state = lp->lp_mux_state; switch (lp->lp_mux_state) { case LACP_MUX_DETACHED: if (selected != LACP_UNSELECTED) { new_state = LACP_MUX_WAITING; } break; case LACP_MUX_WAITING: KASSERT(la->la_pending > 0 || !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), ("timer_wait_while still active")); if (selected == LACP_SELECTED && la->la_pending == 0) { new_state = LACP_MUX_ATTACHED; } else if (selected == LACP_UNSELECTED) { new_state = LACP_MUX_DETACHED; } break; case LACP_MUX_ATTACHED: if (selected == LACP_SELECTED && p_sync) { new_state = LACP_MUX_COLLECTING; } else if (selected != LACP_SELECTED) { new_state = LACP_MUX_DETACHED; } break; case LACP_MUX_COLLECTING: if (selected == LACP_SELECTED && p_sync && p_collecting) { new_state = LACP_MUX_DISTRIBUTING; } else if (selected != LACP_SELECTED || !p_sync) { new_state = LACP_MUX_ATTACHED; } break; case LACP_MUX_DISTRIBUTING: if (selected != LACP_SELECTED || !p_sync || !p_collecting) { new_state = LACP_MUX_COLLECTING; lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); sc->sc_flapping++; } break; default: panic("%s: unknown state", __func__); } if (lp->lp_mux_state == new_state) { return; } lacp_set_mux(lp, new_state); goto re_eval; } static void lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) { struct lacp_aggregator *la = lp->lp_aggregator; if (lp->lp_mux_state == new_state) { return; } switch (new_state) { case LACP_MUX_DETACHED: lp->lp_state &= ~LACP_STATE_SYNC; lacp_disable_distributing(lp); lacp_disable_collecting(lp); lacp_sm_assert_ntt(lp); /* cancel timer */ if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { KASSERT(la->la_pending > 0, ("timer_wait_while not active")); la->la_pending--; } LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); lacp_unselect(lp); break; case LACP_MUX_WAITING: LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, LACP_AGGREGATE_WAIT_TIME); la->la_pending++; break; case LACP_MUX_ATTACHED: lp->lp_state |= LACP_STATE_SYNC; lacp_disable_collecting(lp); lacp_sm_assert_ntt(lp); break; case LACP_MUX_COLLECTING: lacp_enable_collecting(lp); lacp_disable_distributing(lp); lacp_sm_assert_ntt(lp); break; case LACP_MUX_DISTRIBUTING: lacp_enable_distributing(lp); break; default: panic("%s: unknown state", __func__); } LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); lp->lp_mux_state = new_state; } static void lacp_sm_mux_timer(struct lacp_port *lp) { struct lacp_aggregator *la = lp->lp_aggregator; char buf[LACP_LAGIDSTR_MAX+1]; KASSERT(la->la_pending > 0, ("no pending event")); LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, lacp_format_lagid(&la->la_actor, &la->la_partner, buf, sizeof(buf)), la->la_pending, la->la_pending - 1)); la->la_pending--; } /* periodic transmit machine */ static void lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) { if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, LACP_STATE_TIMEOUT)) { return; } LACP_DPRINTF((lp, "partner timeout changed\n")); /* * FAST_PERIODIC -> SLOW_PERIODIC * or * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC * * let lacp_sm_ptx_tx_schedule to update timeout. */ LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); /* * if timeout has been shortened, assert NTT. */ if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { lacp_sm_assert_ntt(lp); } } static void lacp_sm_ptx_tx_schedule(struct lacp_port *lp) { int timeout; if (!(lp->lp_state & LACP_STATE_ACTIVITY) && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { /* * NO_PERIODIC */ LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); return; } if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { return; } timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); } static void lacp_sm_ptx_timer(struct lacp_port *lp) { lacp_sm_assert_ntt(lp); } static void lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) { int timeout; /* * check LACP_DISABLED first */ if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { return; } /* * check loopback condition. */ if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, &lp->lp_actor.lip_systemid)) { return; } /* * EXPIRED, DEFAULTED, CURRENT -> CURRENT */ lacp_sm_rx_update_selected(lp, du); lacp_sm_rx_update_ntt(lp, du); lacp_sm_rx_record_pdu(lp, du); timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); lp->lp_state &= ~LACP_STATE_EXPIRED; /* * kick transmit machine without waiting the next tick. */ lacp_sm_tx(lp); } static void lacp_sm_rx_set_expired(struct lacp_port *lp) { lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); lp->lp_state |= LACP_STATE_EXPIRED; } static void lacp_sm_rx_timer(struct lacp_port *lp) { if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { /* CURRENT -> EXPIRED */ LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); lacp_sm_rx_set_expired(lp); } else { /* EXPIRED -> DEFAULTED */ LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); lacp_sm_rx_update_default_selected(lp); lacp_sm_rx_record_default(lp); lp->lp_state &= ~LACP_STATE_EXPIRED; } } static void lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) { boolean_t active; uint8_t oldpstate; char buf[LACP_STATESTR_MAX+1]; LACP_TRACE(lp); oldpstate = lp->lp_partner.lip_state; active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) || ((lp->lp_state & LACP_STATE_ACTIVITY) && (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); lp->lp_partner = du->ldu_actor; if (active && ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, LACP_STATE_AGGREGATION) && !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { /* XXX nothing? */ } else { lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; } lp->lp_state &= ~LACP_STATE_DEFAULTED; if (oldpstate != lp->lp_partner.lip_state) { LACP_DPRINTF((lp, "old pstate %s\n", lacp_format_state(oldpstate, buf, sizeof(buf)))); LACP_DPRINTF((lp, "new pstate %s\n", lacp_format_state(lp->lp_partner.lip_state, buf, sizeof(buf)))); } /* XXX Hack, still need to implement 5.4.9 para 2,3,4 */ if (lp->lp_lsc->lsc_strict_mode) lp->lp_partner.lip_state |= LACP_STATE_SYNC; lacp_sm_ptx_update_timeout(lp, oldpstate); } static void lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) { LACP_TRACE(lp); if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); lacp_sm_assert_ntt(lp); } } static void lacp_sm_rx_record_default(struct lacp_port *lp) { uint8_t oldpstate; LACP_TRACE(lp); oldpstate = lp->lp_partner.lip_state; if (lp->lp_lsc->lsc_strict_mode) lp->lp_partner = lacp_partner_admin_strict; else lp->lp_partner = lacp_partner_admin_optimistic; lp->lp_state |= LACP_STATE_DEFAULTED; lacp_sm_ptx_update_timeout(lp, oldpstate); } static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, const struct lacp_peerinfo *info) { LACP_TRACE(lp); if (lacp_compare_peerinfo(&lp->lp_partner, info) || !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, LACP_STATE_AGGREGATION)) { lp->lp_selected = LACP_UNSELECTED; /* mux machine will clean up lp->lp_aggregator */ } } static void lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) { LACP_TRACE(lp); lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); } static void lacp_sm_rx_update_default_selected(struct lacp_port *lp) { LACP_TRACE(lp); if (lp->lp_lsc->lsc_strict_mode) lacp_sm_rx_update_selected_from_peerinfo(lp, &lacp_partner_admin_strict); else lacp_sm_rx_update_selected_from_peerinfo(lp, &lacp_partner_admin_optimistic); } /* transmit machine */ static void lacp_sm_tx(struct lacp_port *lp) { int error = 0; if (!(lp->lp_state & LACP_STATE_AGGREGATION) #if 1 || (!(lp->lp_state & LACP_STATE_ACTIVITY) && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) #endif ) { lp->lp_flags &= ~LACP_PORT_NTT; } if (!(lp->lp_flags & LACP_PORT_NTT)) { return; } /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, (3 / LACP_FAST_PERIODIC_TIME)) == 0) { LACP_DPRINTF((lp, "rate limited pdu\n")); return; } if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { error = lacp_xmit_lacpdu(lp); } else { LACP_TPRINTF((lp, "Dropping TX PDU\n")); } if (error == 0) { lp->lp_flags &= ~LACP_PORT_NTT; } else { LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", error)); } } static void lacp_sm_assert_ntt(struct lacp_port *lp) { lp->lp_flags |= LACP_PORT_NTT; } static void lacp_run_timers(struct lacp_port *lp) { int i; for (i = 0; i < LACP_NTIMER; i++) { KASSERT(lp->lp_timer[i] >= 0, ("invalid timer value %d", lp->lp_timer[i])); if (lp->lp_timer[i] == 0) { continue; } else if (--lp->lp_timer[i] <= 0) { if (lacp_timer_funcs[i]) { (*lacp_timer_funcs[i])(lp); } } } } int lacp_marker_input(struct lacp_port *lp, struct mbuf *m) { struct lacp_softc *lsc = lp->lp_lsc; struct lagg_port *lgp = lp->lp_lagg; struct lacp_port *lp2; struct markerdu *mdu; int error = 0; int pending = 0; if (m->m_pkthdr.len != sizeof(*mdu)) { goto bad; } if ((m->m_flags & M_MCAST) == 0) { goto bad; } if (m->m_len < sizeof(*mdu)) { m = m_pullup(m, sizeof(*mdu)); if (m == NULL) { return (ENOMEM); } } mdu = mtod(m, struct markerdu *); if (memcmp(&mdu->mdu_eh.ether_dhost, ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { goto bad; } if (mdu->mdu_sph.sph_version != 1) { goto bad; } switch (mdu->mdu_tlv.tlv_type) { case MARKER_TYPE_INFO: if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, marker_info_tlv_template, TRUE)) { goto bad; } mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; memcpy(&mdu->mdu_eh.ether_dhost, ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); error = lagg_enqueue(lp->lp_ifp, m); break; case MARKER_TYPE_RESPONSE: if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, marker_response_tlv_template, TRUE)) { goto bad; } LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n", ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", ntohl(mdu->mdu_info.mi_rq_xid))); /* Verify that it is the last marker we sent out */ if (memcmp(&mdu->mdu_info, &lp->lp_marker, sizeof(struct lacp_markerinfo))) goto bad; LACP_LOCK(lsc); lp->lp_flags &= ~LACP_PORT_MARK; if (lsc->lsc_suppress_distributing) { /* Check if any ports are waiting for a response */ LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { if (lp2->lp_flags & LACP_PORT_MARK) { pending = 1; break; } } if (pending == 0) { /* All interface queues are clear */ LACP_DPRINTF((NULL, "queue flush complete\n")); lsc->lsc_suppress_distributing = FALSE; } } LACP_UNLOCK(lsc); m_freem(m); break; default: goto bad; } return (error); bad: LACP_DPRINTF((lp, "bad marker frame\n")); m_freem(m); return (EINVAL); } static int tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, const struct tlv_template *tmpl, boolean_t check_type) { while (/* CONSTCOND */ 1) { if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { return (EINVAL); } if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || tlv->tlv_length != tmpl->tmpl_length) { return (EINVAL); } if (tmpl->tmpl_type == 0) { break; } tlv = (const struct tlvhdr *) ((const char *)tlv + tlv->tlv_length); tmpl++; } return (0); } /* Debugging */ const char * lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) { snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", (int)mac[0], (int)mac[1], (int)mac[2], (int)mac[3], (int)mac[4], (int)mac[5]); return (buf); } const char * lacp_format_systemid(const struct lacp_systemid *sysid, char *buf, size_t buflen) { char macbuf[LACP_MACSTR_MAX+1]; snprintf(buf, buflen, "%04X,%s", ntohs(sysid->lsi_prio), lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); return (buf); } const char * lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) { snprintf(buf, buflen, "%04X,%04X", ntohs(portid->lpi_prio), ntohs(portid->lpi_portno)); return (buf); } const char * lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) { char sysid[LACP_SYSTEMIDSTR_MAX+1]; char portid[LACP_PORTIDSTR_MAX+1]; snprintf(buf, buflen, "(%s,%04X,%s)", lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), ntohs(peer->lip_key), lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); return (buf); } const char * lacp_format_lagid(const struct lacp_peerinfo *a, const struct lacp_peerinfo *b, char *buf, size_t buflen) { char astr[LACP_PARTNERSTR_MAX+1]; char bstr[LACP_PARTNERSTR_MAX+1]; #if 0 /* * there's a convention to display small numbered peer * in the left. */ if (lacp_compare_peerinfo(a, b) > 0) { const struct lacp_peerinfo *t; t = a; a = b; b = t; } #endif snprintf(buf, buflen, "[%s,%s]", lacp_format_partner(a, astr, sizeof(astr)), lacp_format_partner(b, bstr, sizeof(bstr))); return (buf); } const char * lacp_format_lagid_aggregator(const struct lacp_aggregator *la, char *buf, size_t buflen) { if (la == NULL) { return ("(none)"); } return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); } const char * lacp_format_state(uint8_t state, char *buf, size_t buflen) { snprintf(buf, buflen, "%b", state, LACP_STATE_BITS); return (buf); } static void lacp_dump_lacpdu(const struct lacpdu *du) { char buf[LACP_PARTNERSTR_MAX+1]; char buf2[LACP_STATESTR_MAX+1]; printf("actor=%s\n", lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); printf("actor.state=%s\n", lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); printf("partner=%s\n", lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); printf("partner.state=%s\n", lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); } static void lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) { va_list va; if (lp) { printf("%s: ", lp->lp_ifp->if_xname); } va_start(va, fmt); vprintf(fmt, va); va_end(va); } Index: projects/clang400-import/sys/net/if.c =================================================================== --- projects/clang400-import/sys/net/if.c (revision 312719) +++ projects/clang400-import/sys/net/if.c (revision 312720) @@ -1,4166 +1,4169 @@ /*- * Copyright (c) 1980, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if.c 8.5 (Berkeley) 1/9/95 * $FreeBSD$ */ #include "opt_compat.h" #include "opt_inet6.h" #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(INET) || defined(INET6) #include #include #include #include #include #ifdef INET #include #endif /* INET */ #ifdef INET6 #include #include #endif /* INET6 */ #endif /* INET || INET6 */ #include #ifdef COMPAT_FREEBSD32 #include #include #endif SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); SYSCTL_INT(_net_link, OID_AUTO, ifqmaxlen, CTLFLAG_RDTUN, &ifqmaxlen, 0, "max send queue size"); /* Log link state change events */ static int log_link_state_change = 1; SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW, &log_link_state_change, 0, "log interface link state change events"); /* Log promiscuous mode change events */ static int log_promisc_mode_change = 1; SYSCTL_INT(_net_link, OID_AUTO, log_promisc_mode_change, CTLFLAG_RDTUN, &log_promisc_mode_change, 1, "log promiscuous mode change events"); /* Interface description */ static unsigned int ifdescr_maxlen = 1024; SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW, &ifdescr_maxlen, 0, "administrative maximum length for interface description"); static MALLOC_DEFINE(M_IFDESCR, "ifdescr", "ifnet descriptions"); /* global sx for non-critical path ifdescr */ static struct sx ifdescr_sx; SX_SYSINIT(ifdescr_sx, &ifdescr_sx, "ifnet descr"); void (*bridge_linkstate_p)(struct ifnet *ifp); void (*ng_ether_link_state_p)(struct ifnet *ifp, int state); void (*lagg_linkstate_p)(struct ifnet *ifp, int state); /* These are external hooks for CARP. */ void (*carp_linkstate_p)(struct ifnet *ifp); void (*carp_demote_adj_p)(int, char *); int (*carp_master_p)(struct ifaddr *); #if defined(INET) || defined(INET6) int (*carp_forus_p)(struct ifnet *ifp, u_char *dhost); int (*carp_output_p)(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa); int (*carp_ioctl_p)(struct ifreq *, u_long, struct thread *); int (*carp_attach_p)(struct ifaddr *, int); void (*carp_detach_p)(struct ifaddr *); #endif #ifdef INET int (*carp_iamatch_p)(struct ifaddr *, uint8_t **); #endif #ifdef INET6 struct ifaddr *(*carp_iamatch6_p)(struct ifnet *ifp, struct in6_addr *taddr6); caddr_t (*carp_macmatch6_p)(struct ifnet *ifp, struct mbuf *m, const struct in6_addr *taddr); #endif struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL; /* * XXX: Style; these should be sorted alphabetically, and unprototyped * static functions should be prototyped. Currently they are sorted by * declaration order. */ static void if_attachdomain(void *); static void if_attachdomain1(struct ifnet *); static int ifconf(u_long, caddr_t); static void if_freemulti(struct ifmultiaddr *); static void if_grow(void); static void if_input_default(struct ifnet *, struct mbuf *); static int if_requestencap_default(struct ifnet *, struct if_encap_req *); static void if_route(struct ifnet *, int flag, int fam); static int if_setflag(struct ifnet *, int, int, int *, int); static int if_transmit(struct ifnet *ifp, struct mbuf *m); static void if_unroute(struct ifnet *, int flag, int fam); static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *); static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *); static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int); static void do_link_state_change(void *, int); static int if_getgroup(struct ifgroupreq *, struct ifnet *); static int if_getgroupmembers(struct ifgroupreq *); static void if_delgroups(struct ifnet *); static void if_attach_internal(struct ifnet *, int, struct if_clone *); static int if_detach_internal(struct ifnet *, int, struct if_clone **); #ifdef VIMAGE static void if_vmove(struct ifnet *, struct vnet *); #endif #ifdef INET6 /* * XXX: declare here to avoid to include many inet6 related files.. * should be more generalized? */ extern void nd6_setmtu(struct ifnet *); #endif /* ipsec helper hooks */ VNET_DEFINE(struct hhook_head *, ipsec_hhh_in[HHOOK_IPSEC_COUNT]); VNET_DEFINE(struct hhook_head *, ipsec_hhh_out[HHOOK_IPSEC_COUNT]); VNET_DEFINE(int, if_index); int ifqmaxlen = IFQ_MAXLEN; VNET_DEFINE(struct ifnethead, ifnet); /* depend on static init XXX */ VNET_DEFINE(struct ifgrouphead, ifg_head); static VNET_DEFINE(int, if_indexlim) = 8; /* Table of ifnet by index. */ VNET_DEFINE(struct ifnet **, ifindex_table); #define V_if_indexlim VNET(if_indexlim) #define V_ifindex_table VNET(ifindex_table) /* * The global network interface list (V_ifnet) and related state (such as * if_index, if_indexlim, and ifindex_table) are protected by an sxlock and * an rwlock. Either may be acquired shared to stablize the list, but both * must be acquired writable to modify the list. This model allows us to * both stablize the interface list during interrupt thread processing, but * also to stablize it over long-running ioctls, without introducing priority * inversions and deadlocks. */ struct rwlock ifnet_rwlock; RW_SYSINIT_FLAGS(ifnet_rw, &ifnet_rwlock, "ifnet_rw", RW_RECURSE); struct sx ifnet_sxlock; SX_SYSINIT_FLAGS(ifnet_sx, &ifnet_sxlock, "ifnet_sx", SX_RECURSE); /* * The allocation of network interfaces is a rather non-atomic affair; we * need to select an index before we are ready to expose the interface for * use, so will use this pointer value to indicate reservation. */ #define IFNET_HOLD (void *)(uintptr_t)(-1) static if_com_alloc_t *if_com_alloc[256]; static if_com_free_t *if_com_free[256]; static MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals"); MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); struct ifnet * ifnet_byindex_locked(u_short idx) { if (idx > V_if_index) return (NULL); if (V_ifindex_table[idx] == IFNET_HOLD) return (NULL); return (V_ifindex_table[idx]); } struct ifnet * ifnet_byindex(u_short idx) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); ifp = ifnet_byindex_locked(idx); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } struct ifnet * ifnet_byindex_ref(u_short idx) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); ifp = ifnet_byindex_locked(idx); if (ifp == NULL || (ifp->if_flags & IFF_DYING)) { IFNET_RUNLOCK_NOSLEEP(); return (NULL); } if_ref(ifp); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } /* * Allocate an ifindex array entry; return 0 on success or an error on * failure. */ static u_short ifindex_alloc(void) { u_short idx; IFNET_WLOCK_ASSERT(); retry: /* * Try to find an empty slot below V_if_index. If we fail, take the * next slot. */ for (idx = 1; idx <= V_if_index; idx++) { if (V_ifindex_table[idx] == NULL) break; } /* Catch if_index overflow. */ if (idx >= V_if_indexlim) { if_grow(); goto retry; } if (idx > V_if_index) V_if_index = idx; return (idx); } static void ifindex_free_locked(u_short idx) { IFNET_WLOCK_ASSERT(); V_ifindex_table[idx] = NULL; while (V_if_index > 0 && V_ifindex_table[V_if_index] == NULL) V_if_index--; } static void ifindex_free(u_short idx) { IFNET_WLOCK(); ifindex_free_locked(idx); IFNET_WUNLOCK(); } static void ifnet_setbyindex_locked(u_short idx, struct ifnet *ifp) { IFNET_WLOCK_ASSERT(); V_ifindex_table[idx] = ifp; } static void ifnet_setbyindex(u_short idx, struct ifnet *ifp) { IFNET_WLOCK(); ifnet_setbyindex_locked(idx, ifp); IFNET_WUNLOCK(); } struct ifaddr * ifaddr_byindex(u_short idx) { struct ifnet *ifp; struct ifaddr *ifa = NULL; IFNET_RLOCK_NOSLEEP(); ifp = ifnet_byindex_locked(idx); if (ifp != NULL && (ifa = ifp->if_addr) != NULL) ifa_ref(ifa); IFNET_RUNLOCK_NOSLEEP(); return (ifa); } /* * Network interface utility routines. * * Routines with ifa_ifwith* names take sockaddr *'s as * parameters. */ static void vnet_if_init(const void *unused __unused) { TAILQ_INIT(&V_ifnet); TAILQ_INIT(&V_ifg_head); IFNET_WLOCK(); if_grow(); /* create initial table */ IFNET_WUNLOCK(); vnet_if_clone_init(); } VNET_SYSINIT(vnet_if_init, SI_SUB_INIT_IF, SI_ORDER_SECOND, vnet_if_init, NULL); #ifdef VIMAGE static void vnet_if_uninit(const void *unused __unused) { VNET_ASSERT(TAILQ_EMPTY(&V_ifnet), ("%s:%d tailq &V_ifnet=%p " "not empty", __func__, __LINE__, &V_ifnet)); VNET_ASSERT(TAILQ_EMPTY(&V_ifg_head), ("%s:%d tailq &V_ifg_head=%p " "not empty", __func__, __LINE__, &V_ifg_head)); free((caddr_t)V_ifindex_table, M_IFNET); } VNET_SYSUNINIT(vnet_if_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, vnet_if_uninit, NULL); static void vnet_if_return(const void *unused __unused) { struct ifnet *ifp, *nifp; /* Return all inherited interfaces to their parent vnets. */ TAILQ_FOREACH_SAFE(ifp, &V_ifnet, if_link, nifp) { if (ifp->if_home_vnet != ifp->if_vnet) if_vmove(ifp, ifp->if_home_vnet); } } VNET_SYSUNINIT(vnet_if_return, SI_SUB_VNET_DONE, SI_ORDER_ANY, vnet_if_return, NULL); #endif static void if_grow(void) { int oldlim; u_int n; struct ifnet **e; IFNET_WLOCK_ASSERT(); oldlim = V_if_indexlim; IFNET_WUNLOCK(); n = (oldlim << 1) * sizeof(*e); e = malloc(n, M_IFNET, M_WAITOK | M_ZERO); IFNET_WLOCK(); if (V_if_indexlim != oldlim) { free(e, M_IFNET); return; } if (V_ifindex_table != NULL) { memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2); free((caddr_t)V_ifindex_table, M_IFNET); } V_if_indexlim <<= 1; V_ifindex_table = e; } /* * Allocate a struct ifnet and an index for an interface. A layer 2 * common structure will also be allocated if an allocation routine is * registered for the passed type. */ struct ifnet * if_alloc(u_char type) { struct ifnet *ifp; u_short idx; ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO); IFNET_WLOCK(); idx = ifindex_alloc(); ifnet_setbyindex_locked(idx, IFNET_HOLD); IFNET_WUNLOCK(); ifp->if_index = idx; ifp->if_type = type; ifp->if_alloctype = type; #ifdef VIMAGE ifp->if_vnet = curvnet; #endif if (if_com_alloc[type] != NULL) { ifp->if_l2com = if_com_alloc[type](type, ifp); if (ifp->if_l2com == NULL) { free(ifp, M_IFNET); ifindex_free(idx); return (NULL); } } IF_ADDR_LOCK_INIT(ifp); TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp); ifp->if_afdata_initialized = 0; IF_AFDATA_LOCK_INIT(ifp); TAILQ_INIT(&ifp->if_addrhead); TAILQ_INIT(&ifp->if_multiaddrs); TAILQ_INIT(&ifp->if_groups); #ifdef MAC mac_ifnet_init(ifp); #endif ifq_init(&ifp->if_snd, ifp); refcount_init(&ifp->if_refcount, 1); /* Index reference. */ for (int i = 0; i < IFCOUNTERS; i++) ifp->if_counters[i] = counter_u64_alloc(M_WAITOK); ifp->if_get_counter = if_get_counter_default; ifnet_setbyindex(ifp->if_index, ifp); return (ifp); } /* * Do the actual work of freeing a struct ifnet, and layer 2 common * structure. This call is made when the last reference to an * interface is released. */ static void if_free_internal(struct ifnet *ifp) { KASSERT((ifp->if_flags & IFF_DYING), ("if_free_internal: interface not dying")); if (if_com_free[ifp->if_alloctype] != NULL) if_com_free[ifp->if_alloctype](ifp->if_l2com, ifp->if_alloctype); #ifdef MAC mac_ifnet_destroy(ifp); #endif /* MAC */ if (ifp->if_description != NULL) free(ifp->if_description, M_IFDESCR); IF_AFDATA_DESTROY(ifp); IF_ADDR_LOCK_DESTROY(ifp); ifq_delete(&ifp->if_snd); for (int i = 0; i < IFCOUNTERS; i++) counter_u64_free(ifp->if_counters[i]); free(ifp, M_IFNET); } /* * Deregister an interface and free the associated storage. */ void if_free(struct ifnet *ifp) { ifp->if_flags |= IFF_DYING; /* XXX: Locking */ CURVNET_SET_QUIET(ifp->if_vnet); IFNET_WLOCK(); KASSERT(ifp == ifnet_byindex_locked(ifp->if_index), ("%s: freeing unallocated ifnet", ifp->if_xname)); ifindex_free_locked(ifp->if_index); IFNET_WUNLOCK(); if (refcount_release(&ifp->if_refcount)) if_free_internal(ifp); CURVNET_RESTORE(); } /* * Interfaces to keep an ifnet type-stable despite the possibility of the * driver calling if_free(). If there are additional references, we defer * freeing the underlying data structure. */ void if_ref(struct ifnet *ifp) { /* We don't assert the ifnet list lock here, but arguably should. */ refcount_acquire(&ifp->if_refcount); } void if_rele(struct ifnet *ifp) { if (!refcount_release(&ifp->if_refcount)) return; if_free_internal(ifp); } void ifq_init(struct ifaltq *ifq, struct ifnet *ifp) { mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF); if (ifq->ifq_maxlen == 0) ifq->ifq_maxlen = ifqmaxlen; ifq->altq_type = 0; ifq->altq_disc = NULL; ifq->altq_flags &= ALTQF_CANTCHANGE; ifq->altq_tbr = NULL; ifq->altq_ifp = ifp; } void ifq_delete(struct ifaltq *ifq) { mtx_destroy(&ifq->ifq_mtx); } /* * Perform generic interface initialization tasks and attach the interface * to the list of "active" interfaces. If vmove flag is set on entry * to if_attach_internal(), perform only a limited subset of initialization * tasks, given that we are moving from one vnet to another an ifnet which * has already been fully initialized. * * Note that if_detach_internal() removes group membership unconditionally * even when vmove flag is set, and if_attach_internal() adds only IFG_ALL. * Thus, when if_vmove() is applied to a cloned interface, group membership * is lost while a cloned one always joins a group whose name is * ifc->ifc_name. To recover this after if_detach_internal() and * if_attach_internal(), the cloner should be specified to * if_attach_internal() via ifc. If it is non-NULL, if_attach_internal() * attempts to join a group whose name is ifc->ifc_name. * * XXX: * - The decision to return void and thus require this function to * succeed is questionable. * - We should probably do more sanity checking. For instance we don't * do anything to insure if_xname is unique or non-empty. */ void if_attach(struct ifnet *ifp) { if_attach_internal(ifp, 0, NULL); } /* * Compute the least common TSO limit. */ void if_hw_tsomax_common(if_t ifp, struct ifnet_hw_tsomax *pmax) { /* * 1) If there is no limit currently, take the limit from * the network adapter. * * 2) If the network adapter has a limit below the current * limit, apply it. */ if (pmax->tsomaxbytes == 0 || (ifp->if_hw_tsomax != 0 && ifp->if_hw_tsomax < pmax->tsomaxbytes)) { pmax->tsomaxbytes = ifp->if_hw_tsomax; } if (pmax->tsomaxsegcount == 0 || (ifp->if_hw_tsomaxsegcount != 0 && ifp->if_hw_tsomaxsegcount < pmax->tsomaxsegcount)) { pmax->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; } if (pmax->tsomaxsegsize == 0 || (ifp->if_hw_tsomaxsegsize != 0 && ifp->if_hw_tsomaxsegsize < pmax->tsomaxsegsize)) { pmax->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; } } /* * Update TSO limit of a network adapter. * * Returns zero if no change. Else non-zero. */ int if_hw_tsomax_update(if_t ifp, struct ifnet_hw_tsomax *pmax) { int retval = 0; if (ifp->if_hw_tsomax != pmax->tsomaxbytes) { ifp->if_hw_tsomax = pmax->tsomaxbytes; retval++; } if (ifp->if_hw_tsomaxsegsize != pmax->tsomaxsegsize) { ifp->if_hw_tsomaxsegsize = pmax->tsomaxsegsize; retval++; } if (ifp->if_hw_tsomaxsegcount != pmax->tsomaxsegcount) { ifp->if_hw_tsomaxsegcount = pmax->tsomaxsegcount; retval++; } return (retval); } static void if_attach_internal(struct ifnet *ifp, int vmove, struct if_clone *ifc) { unsigned socksize, ifasize; int namelen, masklen; struct sockaddr_dl *sdl; struct ifaddr *ifa; if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index)) panic ("%s: BUG: if_attach called without if_alloc'd input()\n", ifp->if_xname); #ifdef VIMAGE ifp->if_vnet = curvnet; if (ifp->if_home_vnet == NULL) ifp->if_home_vnet = curvnet; #endif if_addgroup(ifp, IFG_ALL); /* Restore group membership for cloned interfaces. */ if (vmove && ifc != NULL) if_clone_addgroup(ifp, ifc); getmicrotime(&ifp->if_lastchange); ifp->if_epoch = time_uptime; KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) || (ifp->if_transmit != NULL && ifp->if_qflush != NULL), ("transmit and qflush must both either be set or both be NULL")); if (ifp->if_transmit == NULL) { ifp->if_transmit = if_transmit; ifp->if_qflush = if_qflush; } if (ifp->if_input == NULL) ifp->if_input = if_input_default; if (ifp->if_requestencap == NULL) ifp->if_requestencap = if_requestencap_default; if (!vmove) { #ifdef MAC mac_ifnet_create(ifp); #endif /* * Create a Link Level name for this device. */ namelen = strlen(ifp->if_xname); /* * Always save enough space for any possiable name so we * can do a rename in place later. */ masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ; socksize = masklen + ifp->if_addrlen; if (socksize < sizeof(*sdl)) socksize = sizeof(*sdl); socksize = roundup2(socksize, sizeof(long)); ifasize = sizeof(*ifa) + 2 * socksize; ifa = ifa_alloc(ifasize, M_WAITOK); sdl = (struct sockaddr_dl *)(ifa + 1); sdl->sdl_len = socksize; sdl->sdl_family = AF_LINK; bcopy(ifp->if_xname, sdl->sdl_data, namelen); sdl->sdl_nlen = namelen; sdl->sdl_index = ifp->if_index; sdl->sdl_type = ifp->if_type; ifp->if_addr = ifa; ifa->ifa_ifp = ifp; ifa->ifa_rtrequest = link_rtrequest; ifa->ifa_addr = (struct sockaddr *)sdl; sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); ifa->ifa_netmask = (struct sockaddr *)sdl; sdl->sdl_len = masklen; while (namelen != 0) sdl->sdl_data[--namelen] = 0xff; TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); /* Reliably crash if used uninitialized. */ ifp->if_broadcastaddr = NULL; #if defined(INET) || defined(INET6) /* Use defaults for TSO, if nothing is set */ if (ifp->if_hw_tsomax == 0 && ifp->if_hw_tsomaxsegcount == 0 && ifp->if_hw_tsomaxsegsize == 0) { /* * The TSO defaults needs to be such that an * NFS mbuf list of 35 mbufs totalling just * below 64K works and that a chain of mbufs * can be defragged into at most 32 segments: */ ifp->if_hw_tsomax = min(IP_MAXPACKET, (32 * MCLBYTES) - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); ifp->if_hw_tsomaxsegcount = 35; ifp->if_hw_tsomaxsegsize = 2048; /* 2K */ /* XXX some drivers set IFCAP_TSO after ethernet attach */ if (ifp->if_capabilities & IFCAP_TSO) { if_printf(ifp, "Using defaults for TSO: %u/%u/%u\n", ifp->if_hw_tsomax, ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize); } } #endif } #ifdef VIMAGE else { /* * Update the interface index in the link layer address * of the interface. */ for (ifa = ifp->if_addr; ifa != NULL; ifa = TAILQ_NEXT(ifa, ifa_link)) { if (ifa->ifa_addr->sa_family == AF_LINK) { sdl = (struct sockaddr_dl *)ifa->ifa_addr; sdl->sdl_index = ifp->if_index; } } } #endif IFNET_WLOCK(); TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link); #ifdef VIMAGE curvnet->vnet_ifcnt++; #endif IFNET_WUNLOCK(); if (domain_init_status >= 2) if_attachdomain1(ifp); EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); /* Announce the interface. */ rt_ifannouncemsg(ifp, IFAN_ARRIVAL); } static void if_attachdomain(void *dummy) { struct ifnet *ifp; TAILQ_FOREACH(ifp, &V_ifnet, if_link) if_attachdomain1(ifp); } SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND, if_attachdomain, NULL); static void if_attachdomain1(struct ifnet *ifp) { struct domain *dp; /* * Since dp->dom_ifattach calls malloc() with M_WAITOK, we * cannot lock ifp->if_afdata initialization, entirely. */ IF_AFDATA_LOCK(ifp); if (ifp->if_afdata_initialized >= domain_init_status) { IF_AFDATA_UNLOCK(ifp); log(LOG_WARNING, "%s called more than once on %s\n", __func__, ifp->if_xname); return; } ifp->if_afdata_initialized = domain_init_status; IF_AFDATA_UNLOCK(ifp); /* address family dependent data region */ bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); for (dp = domains; dp; dp = dp->dom_next) { if (dp->dom_ifattach) ifp->if_afdata[dp->dom_family] = (*dp->dom_ifattach)(ifp); } } /* * Remove any unicast or broadcast network addresses from an interface. */ void if_purgeaddrs(struct ifnet *ifp) { struct ifaddr *ifa, *next; /* XXX cannot hold IF_ADDR_WLOCK over called functions. */ TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) { if (ifa->ifa_addr->sa_family == AF_LINK) continue; #ifdef INET /* XXX: Ugly!! ad hoc just for INET */ if (ifa->ifa_addr->sa_family == AF_INET) { struct ifaliasreq ifr; bzero(&ifr, sizeof(ifr)); ifr.ifra_addr = *ifa->ifa_addr; if (ifa->ifa_dstaddr) ifr.ifra_broadaddr = *ifa->ifa_dstaddr; if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, NULL) == 0) continue; } #endif /* INET */ #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) { in6_purgeaddr(ifa); /* ifp_addrhead is already updated */ continue; } #endif /* INET6 */ IF_ADDR_WLOCK(ifp); TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); IF_ADDR_WUNLOCK(ifp); ifa_free(ifa); } } /* * Remove any multicast network addresses from an interface when an ifnet * is going away. */ static void if_purgemaddrs(struct ifnet *ifp) { struct ifmultiaddr *ifma; struct ifmultiaddr *next; IF_ADDR_WLOCK(ifp); TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) if_delmulti_locked(ifp, ifma, 1); IF_ADDR_WUNLOCK(ifp); } /* * Detach an interface, removing it from the list of "active" interfaces. * If vmove flag is set on entry to if_detach_internal(), perform only a * limited subset of cleanup tasks, given that we are moving an ifnet from * one vnet to another, where it must be fully operational. * * XXXRW: There are some significant questions about event ordering, and * how to prevent things from starting to use the interface during detach. */ void if_detach(struct ifnet *ifp) { CURVNET_SET_QUIET(ifp->if_vnet); if_detach_internal(ifp, 0, NULL); CURVNET_RESTORE(); } /* * The vmove flag, if set, indicates that we are called from a callpath * that is moving an interface to a different vnet instance. * * The shutdown flag, if set, indicates that we are called in the * process of shutting down a vnet instance. Currently only the * vnet_if_return SYSUNINIT function sets it. Note: we can be called * on a vnet instance shutdown without this flag being set, e.g., when * the cloned interfaces are destoyed as first thing of teardown. */ static int if_detach_internal(struct ifnet *ifp, int vmove, struct if_clone **ifcp) { struct ifaddr *ifa; int i; struct domain *dp; struct ifnet *iter; int found = 0; #ifdef VIMAGE int shutdown; shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; #endif IFNET_WLOCK(); TAILQ_FOREACH(iter, &V_ifnet, if_link) if (iter == ifp) { TAILQ_REMOVE(&V_ifnet, ifp, if_link); found = 1; break; } IFNET_WUNLOCK(); if (!found) { /* * While we would want to panic here, we cannot * guarantee that the interface is indeed still on * the list given we don't hold locks all the way. */ return (ENOENT); #if 0 if (vmove) panic("%s: ifp=%p not on the ifnet tailq %p", __func__, ifp, &V_ifnet); else return; /* XXX this should panic as well? */ #endif } /* * At this point we know the interface still was on the ifnet list * and we removed it so we are in a stable state. */ #ifdef VIMAGE curvnet->vnet_ifcnt--; #endif /* * In any case (destroy or vmove) detach us from the groups * and remove/wait for pending events on the taskq. * XXX-BZ in theory an interface could still enqueue a taskq change? */ if_delgroups(ifp); taskqueue_drain(taskqueue_swi, &ifp->if_linktask); /* * Check if this is a cloned interface or not. Must do even if * shutting down as a if_vmove_reclaim() would move the ifp and * the if_clone_addgroup() will have a corrupted string overwise * from a gibberish pointer. */ if (vmove && ifcp != NULL) *ifcp = if_clone_findifc(ifp); if_down(ifp); #ifdef VIMAGE /* * On VNET shutdown abort here as the stack teardown will do all * the work top-down for us. */ if (shutdown) { /* * In case of a vmove we are done here without error. * If we would signal an error it would lead to the same * abort as if we did not find the ifnet anymore. * if_detach() calls us in void context and does not care * about an early abort notification, so life is splendid :) */ goto finish_vnet_shutdown; } #endif /* * At this point we are not tearing down a VNET and are either * going to destroy or vmove the interface and have to cleanup * accordingly. */ /* * Remove routes and flush queues. */ #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) altq_disable(&ifp->if_snd); if (ALTQ_IS_ATTACHED(&ifp->if_snd)) altq_detach(&ifp->if_snd); #endif if_purgeaddrs(ifp); #ifdef INET in_ifdetach(ifp); #endif #ifdef INET6 /* * Remove all IPv6 kernel structs related to ifp. This should be done * before removing routing entries below, since IPv6 interface direct * routes are expected to be removed by the IPv6-specific kernel API. * Otherwise, the kernel will detect some inconsistency and bark it. */ in6_ifdetach(ifp); #endif if_purgemaddrs(ifp); /* Announce that the interface is gone. */ rt_ifannouncemsg(ifp, IFAN_DEPARTURE); EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); if (!vmove) { /* * Prevent further calls into the device driver via ifnet. */ if_dead(ifp); /* * Remove link ifaddr pointer and maybe decrement if_index. * Clean up all addresses. */ ifp->if_addr = NULL; /* We can now free link ifaddr. */ IF_ADDR_WLOCK(ifp); if (!TAILQ_EMPTY(&ifp->if_addrhead)) { ifa = TAILQ_FIRST(&ifp->if_addrhead); TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); IF_ADDR_WUNLOCK(ifp); ifa_free(ifa); } else IF_ADDR_WUNLOCK(ifp); } rt_flushifroutes(ifp); #ifdef VIMAGE finish_vnet_shutdown: #endif /* * We cannot hold the lock over dom_ifdetach calls as they might * sleep, for example trying to drain a callout, thus open up the * theoretical race with re-attaching. */ IF_AFDATA_LOCK(ifp); i = ifp->if_afdata_initialized; ifp->if_afdata_initialized = 0; IF_AFDATA_UNLOCK(ifp); for (dp = domains; i > 0 && dp; dp = dp->dom_next) { if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) { (*dp->dom_ifdetach)(ifp, ifp->if_afdata[dp->dom_family]); ifp->if_afdata[dp->dom_family] = NULL; } } return (0); } #ifdef VIMAGE /* * if_vmove() performs a limited version of if_detach() in current * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg. * An attempt is made to shrink if_index in current vnet, find an * unused if_index in target vnet and calls if_grow() if necessary, * and finally find an unused if_xname for the target vnet. */ static void if_vmove(struct ifnet *ifp, struct vnet *new_vnet) { struct if_clone *ifc; u_int bif_dlt, bif_hdrlen; int rc; /* * if_detach_internal() will call the eventhandler to notify * interface departure. That will detach if_bpf. We need to * safe the dlt and hdrlen so we can re-attach it later. */ bpf_get_bp_params(ifp->if_bpf, &bif_dlt, &bif_hdrlen); /* * Detach from current vnet, but preserve LLADDR info, do not * mark as dead etc. so that the ifnet can be reattached later. * If we cannot find it, we lost the race to someone else. */ rc = if_detach_internal(ifp, 1, &ifc); if (rc != 0) return; /* * Unlink the ifnet from ifindex_table[] in current vnet, and shrink * the if_index for that vnet if possible. * * NOTE: IFNET_WLOCK/IFNET_WUNLOCK() are assumed to be unvirtualized, * or we'd lock on one vnet and unlock on another. */ IFNET_WLOCK(); ifindex_free_locked(ifp->if_index); IFNET_WUNLOCK(); /* * Perform interface-specific reassignment tasks, if provided by * the driver. */ if (ifp->if_reassign != NULL) ifp->if_reassign(ifp, new_vnet, NULL); /* * Switch to the context of the target vnet. */ CURVNET_SET_QUIET(new_vnet); IFNET_WLOCK(); ifp->if_index = ifindex_alloc(); ifnet_setbyindex_locked(ifp->if_index, ifp); IFNET_WUNLOCK(); if_attach_internal(ifp, 1, ifc); if (ifp->if_bpf == NULL) bpfattach(ifp, bif_dlt, bif_hdrlen); CURVNET_RESTORE(); } /* * Move an ifnet to or from another child prison/vnet, specified by the jail id. */ static int if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid) { struct prison *pr; struct ifnet *difp; int shutdown; /* Try to find the prison within our visibility. */ sx_slock(&allprison_lock); pr = prison_find_child(td->td_ucred->cr_prison, jid); sx_sunlock(&allprison_lock); if (pr == NULL) return (ENXIO); prison_hold_locked(pr); mtx_unlock(&pr->pr_mtx); /* Do not try to move the iface from and to the same prison. */ if (pr->pr_vnet == ifp->if_vnet) { prison_free(pr); return (EEXIST); } /* Make sure the named iface does not exists in the dst. prison/vnet. */ /* XXX Lock interfaces to avoid races. */ CURVNET_SET_QUIET(pr->pr_vnet); difp = ifunit(ifname); if (difp != NULL) { CURVNET_RESTORE(); prison_free(pr); return (EEXIST); } /* Make sure the VNET is stable. */ shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; if (shutdown) { CURVNET_RESTORE(); prison_free(pr); return (EBUSY); } CURVNET_RESTORE(); /* Move the interface into the child jail/vnet. */ if_vmove(ifp, pr->pr_vnet); /* Report the new if_xname back to the userland. */ sprintf(ifname, "%s", ifp->if_xname); prison_free(pr); return (0); } static int if_vmove_reclaim(struct thread *td, char *ifname, int jid) { struct prison *pr; struct vnet *vnet_dst; struct ifnet *ifp; int shutdown; /* Try to find the prison within our visibility. */ sx_slock(&allprison_lock); pr = prison_find_child(td->td_ucred->cr_prison, jid); sx_sunlock(&allprison_lock); if (pr == NULL) return (ENXIO); prison_hold_locked(pr); mtx_unlock(&pr->pr_mtx); /* Make sure the named iface exists in the source prison/vnet. */ CURVNET_SET(pr->pr_vnet); ifp = ifunit(ifname); /* XXX Lock to avoid races. */ if (ifp == NULL) { CURVNET_RESTORE(); prison_free(pr); return (ENXIO); } /* Do not try to move the iface from and to the same prison. */ vnet_dst = TD_TO_VNET(td); if (vnet_dst == ifp->if_vnet) { CURVNET_RESTORE(); prison_free(pr); return (EEXIST); } /* Make sure the VNET is stable. */ shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; if (shutdown) { CURVNET_RESTORE(); prison_free(pr); return (EBUSY); } /* Get interface back from child jail/vnet. */ if_vmove(ifp, vnet_dst); CURVNET_RESTORE(); /* Report the new if_xname back to the userland. */ sprintf(ifname, "%s", ifp->if_xname); prison_free(pr); return (0); } #endif /* VIMAGE */ /* * Add a group to an interface */ int if_addgroup(struct ifnet *ifp, const char *groupname) { struct ifg_list *ifgl; struct ifg_group *ifg = NULL; struct ifg_member *ifgm; int new = 0; if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && groupname[strlen(groupname) - 1] <= '9') return (EINVAL); IFNET_WLOCK(); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) { IFNET_WUNLOCK(); return (EEXIST); } if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP, M_NOWAIT)) == NULL) { IFNET_WUNLOCK(); return (ENOMEM); } if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member), M_TEMP, M_NOWAIT)) == NULL) { free(ifgl, M_TEMP); IFNET_WUNLOCK(); return (ENOMEM); } TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) if (!strcmp(ifg->ifg_group, groupname)) break; if (ifg == NULL) { if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group), M_TEMP, M_NOWAIT)) == NULL) { free(ifgl, M_TEMP); free(ifgm, M_TEMP); IFNET_WUNLOCK(); return (ENOMEM); } strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); ifg->ifg_refcnt = 0; TAILQ_INIT(&ifg->ifg_members); TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next); new = 1; } ifg->ifg_refcnt++; ifgl->ifgl_group = ifg; ifgm->ifgm_ifp = ifp; IF_ADDR_WLOCK(ifp); TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); IF_ADDR_WUNLOCK(ifp); IFNET_WUNLOCK(); if (new) EVENTHANDLER_INVOKE(group_attach_event, ifg); EVENTHANDLER_INVOKE(group_change_event, groupname); return (0); } /* * Remove a group from an interface */ int if_delgroup(struct ifnet *ifp, const char *groupname) { struct ifg_list *ifgl; struct ifg_member *ifgm; IFNET_WLOCK(); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) break; if (ifgl == NULL) { IFNET_WUNLOCK(); return (ENOENT); } IF_ADDR_WLOCK(ifp); TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); IF_ADDR_WUNLOCK(ifp); TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) if (ifgm->ifgm_ifp == ifp) break; if (ifgm != NULL) { TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); free(ifgm, M_TEMP); } if (--ifgl->ifgl_group->ifg_refcnt == 0) { TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); IFNET_WUNLOCK(); EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); free(ifgl->ifgl_group, M_TEMP); } else IFNET_WUNLOCK(); free(ifgl, M_TEMP); EVENTHANDLER_INVOKE(group_change_event, groupname); return (0); } /* * Remove an interface from all groups */ static void if_delgroups(struct ifnet *ifp) { struct ifg_list *ifgl; struct ifg_member *ifgm; char groupname[IFNAMSIZ]; IFNET_WLOCK(); while (!TAILQ_EMPTY(&ifp->if_groups)) { ifgl = TAILQ_FIRST(&ifp->if_groups); strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ); IF_ADDR_WLOCK(ifp); TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); IF_ADDR_WUNLOCK(ifp); TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) if (ifgm->ifgm_ifp == ifp) break; if (ifgm != NULL) { TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); free(ifgm, M_TEMP); } if (--ifgl->ifgl_group->ifg_refcnt == 0) { TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); IFNET_WUNLOCK(); EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); free(ifgl->ifgl_group, M_TEMP); } else IFNET_WUNLOCK(); free(ifgl, M_TEMP); EVENTHANDLER_INVOKE(group_change_event, groupname); IFNET_WLOCK(); } IFNET_WUNLOCK(); } /* * Stores all groups from an interface in memory pointed * to by data */ static int if_getgroup(struct ifgroupreq *data, struct ifnet *ifp) { int len, error; struct ifg_list *ifgl; struct ifg_req ifgrq, *ifgp; struct ifgroupreq *ifgr = data; if (ifgr->ifgr_len == 0) { IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) ifgr->ifgr_len += sizeof(struct ifg_req); IF_ADDR_RUNLOCK(ifp); return (0); } len = ifgr->ifgr_len; ifgp = ifgr->ifgr_groups; /* XXX: wire */ IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { if (len < sizeof(ifgrq)) { IF_ADDR_RUNLOCK(ifp); return (EINVAL); } bzero(&ifgrq, sizeof ifgrq); strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, sizeof(ifgrq.ifgrq_group)); if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { IF_ADDR_RUNLOCK(ifp); return (error); } len -= sizeof(ifgrq); ifgp++; } IF_ADDR_RUNLOCK(ifp); return (0); } /* * Stores all members of a group in memory pointed to by data */ static int if_getgroupmembers(struct ifgroupreq *data) { struct ifgroupreq *ifgr = data; struct ifg_group *ifg; struct ifg_member *ifgm; struct ifg_req ifgrq, *ifgp; int len, error; IFNET_RLOCK(); TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) break; if (ifg == NULL) { IFNET_RUNLOCK(); return (ENOENT); } if (ifgr->ifgr_len == 0) { TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) ifgr->ifgr_len += sizeof(ifgrq); IFNET_RUNLOCK(); return (0); } len = ifgr->ifgr_len; ifgp = ifgr->ifgr_groups; TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { if (len < sizeof(ifgrq)) { IFNET_RUNLOCK(); return (EINVAL); } bzero(&ifgrq, sizeof ifgrq); strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, sizeof(ifgrq.ifgrq_member)); if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { IFNET_RUNLOCK(); return (error); } len -= sizeof(ifgrq); ifgp++; } IFNET_RUNLOCK(); return (0); } /* * Return counter values from counter(9)s stored in ifnet. */ uint64_t if_get_counter_default(struct ifnet *ifp, ift_counter cnt) { KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); return (counter_u64_fetch(ifp->if_counters[cnt])); } /* * Increase an ifnet counter. Usually used for counters shared * between the stack and a driver, but function supports them all. */ void if_inc_counter(struct ifnet *ifp, ift_counter cnt, int64_t inc) { KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); counter_u64_add(ifp->if_counters[cnt], inc); } /* * Copy data from ifnet to userland API structure if_data. */ void if_data_copy(struct ifnet *ifp, struct if_data *ifd) { ifd->ifi_type = ifp->if_type; ifd->ifi_physical = 0; ifd->ifi_addrlen = ifp->if_addrlen; ifd->ifi_hdrlen = ifp->if_hdrlen; ifd->ifi_link_state = ifp->if_link_state; ifd->ifi_vhid = 0; ifd->ifi_datalen = sizeof(struct if_data); ifd->ifi_mtu = ifp->if_mtu; ifd->ifi_metric = ifp->if_metric; ifd->ifi_baudrate = ifp->if_baudrate; ifd->ifi_hwassist = ifp->if_hwassist; ifd->ifi_epoch = ifp->if_epoch; ifd->ifi_lastchange = ifp->if_lastchange; ifd->ifi_ipackets = ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS); ifd->ifi_ierrors = ifp->if_get_counter(ifp, IFCOUNTER_IERRORS); ifd->ifi_opackets = ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS); ifd->ifi_oerrors = ifp->if_get_counter(ifp, IFCOUNTER_OERRORS); ifd->ifi_collisions = ifp->if_get_counter(ifp, IFCOUNTER_COLLISIONS); ifd->ifi_ibytes = ifp->if_get_counter(ifp, IFCOUNTER_IBYTES); ifd->ifi_obytes = ifp->if_get_counter(ifp, IFCOUNTER_OBYTES); ifd->ifi_imcasts = ifp->if_get_counter(ifp, IFCOUNTER_IMCASTS); ifd->ifi_omcasts = ifp->if_get_counter(ifp, IFCOUNTER_OMCASTS); ifd->ifi_iqdrops = ifp->if_get_counter(ifp, IFCOUNTER_IQDROPS); ifd->ifi_oqdrops = ifp->if_get_counter(ifp, IFCOUNTER_OQDROPS); ifd->ifi_noproto = ifp->if_get_counter(ifp, IFCOUNTER_NOPROTO); } /* * Wrapper functions for struct ifnet address list locking macros. These are * used by kernel modules to avoid encoding programming interface or binary * interface assumptions that may be violated when kernel-internal locking * approaches change. */ void if_addr_rlock(struct ifnet *ifp) { IF_ADDR_RLOCK(ifp); } void if_addr_runlock(struct ifnet *ifp) { IF_ADDR_RUNLOCK(ifp); } void if_maddr_rlock(if_t ifp) { IF_ADDR_RLOCK((struct ifnet *)ifp); } void if_maddr_runlock(if_t ifp) { IF_ADDR_RUNLOCK((struct ifnet *)ifp); } /* * Initialization, destruction and refcounting functions for ifaddrs. */ struct ifaddr * ifa_alloc(size_t size, int flags) { struct ifaddr *ifa; KASSERT(size >= sizeof(struct ifaddr), ("%s: invalid size %zu", __func__, size)); ifa = malloc(size, M_IFADDR, M_ZERO | flags); if (ifa == NULL) return (NULL); if ((ifa->ifa_opackets = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_ipackets = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_obytes = counter_u64_alloc(flags)) == NULL) goto fail; if ((ifa->ifa_ibytes = counter_u64_alloc(flags)) == NULL) goto fail; refcount_init(&ifa->ifa_refcnt, 1); return (ifa); fail: /* free(NULL) is okay */ counter_u64_free(ifa->ifa_opackets); counter_u64_free(ifa->ifa_ipackets); counter_u64_free(ifa->ifa_obytes); counter_u64_free(ifa->ifa_ibytes); free(ifa, M_IFADDR); return (NULL); } void ifa_ref(struct ifaddr *ifa) { refcount_acquire(&ifa->ifa_refcnt); } void ifa_free(struct ifaddr *ifa) { if (refcount_release(&ifa->ifa_refcnt)) { counter_u64_free(ifa->ifa_opackets); counter_u64_free(ifa->ifa_ipackets); counter_u64_free(ifa->ifa_obytes); counter_u64_free(ifa->ifa_ibytes); free(ifa, M_IFADDR); } } static int ifa_maintain_loopback_route(int cmd, const char *otype, struct ifaddr *ifa, struct sockaddr *ia) { int error; struct rt_addrinfo info; struct sockaddr_dl null_sdl; struct ifnet *ifp; ifp = ifa->ifa_ifp; bzero(&info, sizeof(info)); if (cmd != RTM_DELETE) info.rti_ifp = V_loif; info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC; info.rti_info[RTAX_DST] = ia; info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl; link_init_sdl(ifp, (struct sockaddr *)&null_sdl, ifp->if_type); error = rtrequest1_fib(cmd, &info, NULL, ifp->if_fib); if (error != 0) log(LOG_DEBUG, "%s: %s failed for interface %s: %u\n", __func__, otype, if_name(ifp), error); return (error); } int ifa_add_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { return (ifa_maintain_loopback_route(RTM_ADD, "insertion", ifa, ia)); } int ifa_del_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { return (ifa_maintain_loopback_route(RTM_DELETE, "deletion", ifa, ia)); } int ifa_switch_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) { return (ifa_maintain_loopback_route(RTM_CHANGE, "switch", ifa, ia)); } /* * XXX: Because sockaddr_dl has deeper structure than the sockaddr * structs used to represent other address families, it is necessary * to perform a different comparison. */ #define sa_dl_equal(a1, a2) \ ((((const struct sockaddr_dl *)(a1))->sdl_len == \ ((const struct sockaddr_dl *)(a2))->sdl_len) && \ (bcmp(CLLADDR((const struct sockaddr_dl *)(a1)), \ CLLADDR((const struct sockaddr_dl *)(a2)), \ ((const struct sockaddr_dl *)(a1))->sdl_alen) == 0)) /* * Locate an interface based on a complete address. */ /*ARGSUSED*/ static struct ifaddr * ifa_ifwithaddr_internal(const struct sockaddr *addr, int getref) { struct ifnet *ifp; struct ifaddr *ifa; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if (sa_equal(addr, ifa->ifa_addr)) { if (getref) ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } /* IP6 doesn't have broadcast */ if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr && ifa->ifa_broadaddr->sa_len != 0 && sa_equal(ifa->ifa_broadaddr, addr)) { if (getref) ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } IF_ADDR_RUNLOCK(ifp); } ifa = NULL; done: IFNET_RUNLOCK_NOSLEEP(); return (ifa); } struct ifaddr * ifa_ifwithaddr(const struct sockaddr *addr) { return (ifa_ifwithaddr_internal(addr, 1)); } int ifa_ifwithaddr_check(const struct sockaddr *addr) { return (ifa_ifwithaddr_internal(addr, 0) != NULL); } /* * Locate an interface based on the broadcast address. */ /* ARGSUSED */ struct ifaddr * ifa_ifwithbroadaddr(const struct sockaddr *addr, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr && ifa->ifa_broadaddr->sa_len != 0 && sa_equal(ifa->ifa_broadaddr, addr)) { ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } IF_ADDR_RUNLOCK(ifp); } ifa = NULL; done: IFNET_RUNLOCK_NOSLEEP(); return (ifa); } /* * Locate the point to point interface with a given destination address. */ /*ARGSUSED*/ struct ifaddr * ifa_ifwithdstaddr(const struct sockaddr *addr, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((ifp->if_flags & IFF_POINTOPOINT) == 0) continue; if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != addr->sa_family) continue; if (ifa->ifa_dstaddr != NULL && sa_equal(addr, ifa->ifa_dstaddr)) { ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } IF_ADDR_RUNLOCK(ifp); } ifa = NULL; done: IFNET_RUNLOCK_NOSLEEP(); return (ifa); } /* * Find an interface on a specific network. If many, choice * is most specific found. */ struct ifaddr * ifa_ifwithnet(const struct sockaddr *addr, int ignore_ptp, int fibnum) { struct ifnet *ifp; struct ifaddr *ifa; struct ifaddr *ifa_maybe = NULL; u_int af = addr->sa_family; const char *addr_data = addr->sa_data, *cplim; /* * AF_LINK addresses can be looked up directly by their index number, * so do that if we can. */ if (af == AF_LINK) { const struct sockaddr_dl *sdl = (const struct sockaddr_dl *)addr; if (sdl->sdl_index && sdl->sdl_index <= V_if_index) return (ifaddr_byindex(sdl->sdl_index)); } /* * Scan though each interface, looking for ones that have addresses * in this address family and the requested fib. Maintain a reference * on ifa_maybe once we find one, as we release the IF_ADDR_RLOCK() that * kept it stable when we move onto the next interface. */ IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) continue; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { const char *cp, *cp2, *cp3; if (ifa->ifa_addr->sa_family != af) next: continue; if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT && !ignore_ptp) { /* * This is a bit broken as it doesn't * take into account that the remote end may * be a single node in the network we are * looking for. * The trouble is that we don't know the * netmask for the remote end. */ if (ifa->ifa_dstaddr != NULL && sa_equal(addr, ifa->ifa_dstaddr)) { ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); goto done; } } else { /* * Scan all the bits in the ifa's address. * If a bit dissagrees with what we are * looking for, mask it with the netmask * to see if it really matters. * (A byte at a time) */ if (ifa->ifa_netmask == 0) continue; cp = addr_data; cp2 = ifa->ifa_addr->sa_data; cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; while (cp3 < cplim) if ((*cp++ ^ *cp2++) & *cp3++) goto next; /* next address! */ /* * If the netmask of what we just found * is more specific than what we had before * (if we had one), or if the virtual status * of new prefix is better than of the old one, * then remember the new one before continuing * to search for an even better one. */ if (ifa_maybe == NULL || ifa_preferred(ifa_maybe, ifa) || rn_refines((caddr_t)ifa->ifa_netmask, (caddr_t)ifa_maybe->ifa_netmask)) { if (ifa_maybe != NULL) ifa_free(ifa_maybe); ifa_maybe = ifa; ifa_ref(ifa_maybe); } } } IF_ADDR_RUNLOCK(ifp); } ifa = ifa_maybe; ifa_maybe = NULL; done: IFNET_RUNLOCK_NOSLEEP(); if (ifa_maybe != NULL) ifa_free(ifa_maybe); return (ifa); } /* * Find an interface address specific to an interface best matching * a given address. */ struct ifaddr * ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp) { struct ifaddr *ifa; const char *cp, *cp2, *cp3; char *cplim; struct ifaddr *ifa_maybe = NULL; u_int af = addr->sa_family; if (af >= AF_MAX) return (NULL); IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != af) continue; if (ifa_maybe == NULL) ifa_maybe = ifa; if (ifa->ifa_netmask == 0) { if (sa_equal(addr, ifa->ifa_addr) || (ifa->ifa_dstaddr && sa_equal(addr, ifa->ifa_dstaddr))) goto done; continue; } if (ifp->if_flags & IFF_POINTOPOINT) { if (sa_equal(addr, ifa->ifa_dstaddr)) goto done; } else { cp = addr->sa_data; cp2 = ifa->ifa_addr->sa_data; cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; for (; cp3 < cplim; cp3++) if ((*cp++ ^ *cp2++) & *cp3) break; if (cp3 == cplim) goto done; } } ifa = ifa_maybe; done: if (ifa != NULL) ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); return (ifa); } /* * See whether new ifa is better than current one: * 1) A non-virtual one is preferred over virtual. * 2) A virtual in master state preferred over any other state. * * Used in several address selecting functions. */ int ifa_preferred(struct ifaddr *cur, struct ifaddr *next) { return (cur->ifa_carp && (!next->ifa_carp || ((*carp_master_p)(next) && !(*carp_master_p)(cur)))); } #include /* * Default action when installing a route with a Link Level gateway. * Lookup an appropriate real ifa to point to. * This should be moved to /sys/net/link.c eventually. */ static void link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info) { struct ifaddr *ifa, *oifa; struct sockaddr *dst; struct ifnet *ifp; if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == NULL) || ((ifp = ifa->ifa_ifp) == NULL) || ((dst = rt_key(rt)) == NULL)) return; ifa = ifaof_ifpforaddr(dst, ifp); if (ifa) { oifa = rt->rt_ifa; rt->rt_ifa = ifa; ifa_free(oifa); if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) ifa->ifa_rtrequest(cmd, rt, info); } } struct sockaddr_dl * link_alloc_sdl(size_t size, int flags) { return (malloc(size, M_TEMP, flags)); } void link_free_sdl(struct sockaddr *sa) { free(sa, M_TEMP); } /* * Fills in given sdl with interface basic info. * Returns pointer to filled sdl. */ struct sockaddr_dl * link_init_sdl(struct ifnet *ifp, struct sockaddr *paddr, u_char iftype) { struct sockaddr_dl *sdl; sdl = (struct sockaddr_dl *)paddr; memset(sdl, 0, sizeof(struct sockaddr_dl)); sdl->sdl_len = sizeof(struct sockaddr_dl); sdl->sdl_family = AF_LINK; sdl->sdl_index = ifp->if_index; sdl->sdl_type = iftype; return (sdl); } /* * Mark an interface down and notify protocols of * the transition. */ static void if_unroute(struct ifnet *ifp, int flag, int fam) { struct ifaddr *ifa; KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP")); ifp->if_flags &= ~flag; getmicrotime(&ifp->if_lastchange); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) pfctlinput(PRC_IFDOWN, ifa->ifa_addr); ifp->if_qflush(ifp); if (ifp->if_carp) (*carp_linkstate_p)(ifp); rt_ifmsg(ifp); } /* * Mark an interface up and notify protocols of * the transition. */ static void if_route(struct ifnet *ifp, int flag, int fam) { struct ifaddr *ifa; KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP")); ifp->if_flags |= flag; getmicrotime(&ifp->if_lastchange); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) pfctlinput(PRC_IFUP, ifa->ifa_addr); if (ifp->if_carp) (*carp_linkstate_p)(ifp); rt_ifmsg(ifp); #ifdef INET6 in6_if_up(ifp); #endif } void (*vlan_link_state_p)(struct ifnet *); /* XXX: private from if_vlan */ void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */ struct ifnet *(*vlan_trunkdev_p)(struct ifnet *); struct ifnet *(*vlan_devat_p)(struct ifnet *, uint16_t); int (*vlan_tag_p)(struct ifnet *, uint16_t *); int (*vlan_setcookie_p)(struct ifnet *, void *); void *(*vlan_cookie_p)(struct ifnet *); /* * Handle a change in the interface link state. To avoid LORs * between driver lock and upper layer locks, as well as possible * recursions, we post event to taskqueue, and all job * is done in static do_link_state_change(). */ void if_link_state_change(struct ifnet *ifp, int link_state) { /* Return if state hasn't changed. */ if (ifp->if_link_state == link_state) return; ifp->if_link_state = link_state; taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask); } static void do_link_state_change(void *arg, int pending) { struct ifnet *ifp = (struct ifnet *)arg; int link_state = ifp->if_link_state; CURVNET_SET(ifp->if_vnet); /* Notify that the link state has changed. */ rt_ifmsg(ifp); if (ifp->if_vlantrunk != NULL) (*vlan_link_state_p)(ifp); if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) && ifp->if_l2com != NULL) (*ng_ether_link_state_p)(ifp, link_state); if (ifp->if_carp) (*carp_linkstate_p)(ifp); if (ifp->if_bridge) (*bridge_linkstate_p)(ifp); if (ifp->if_lagg) (*lagg_linkstate_p)(ifp, link_state); if (IS_DEFAULT_VNET(curvnet)) devctl_notify("IFNET", ifp->if_xname, (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); if (pending > 1) if_printf(ifp, "%d link states coalesced\n", pending); if (log_link_state_change) log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname, (link_state == LINK_STATE_UP) ? "UP" : "DOWN" ); EVENTHANDLER_INVOKE(ifnet_link_event, ifp, link_state); CURVNET_RESTORE(); } /* * Mark an interface down and notify protocols of * the transition. */ void if_down(struct ifnet *ifp) { + EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_DOWN); if_unroute(ifp, IFF_UP, AF_UNSPEC); } /* * Mark an interface up and notify protocols of * the transition. */ void if_up(struct ifnet *ifp) { if_route(ifp, IFF_UP, AF_UNSPEC); + EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_UP); } /* * Flush an interface queue. */ void if_qflush(struct ifnet *ifp) { struct mbuf *m, *n; struct ifaltq *ifq; ifq = &ifp->if_snd; IFQ_LOCK(ifq); #ifdef ALTQ if (ALTQ_IS_ENABLED(ifq)) ALTQ_PURGE(ifq); #endif n = ifq->ifq_head; while ((m = n) != NULL) { n = m->m_nextpkt; m_freem(m); } ifq->ifq_head = 0; ifq->ifq_tail = 0; ifq->ifq_len = 0; IFQ_UNLOCK(ifq); } /* * Map interface name to interface structure pointer, with or without * returning a reference. */ struct ifnet * ifunit_ref(const char *name) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 && !(ifp->if_flags & IFF_DYING)) break; } if (ifp != NULL) if_ref(ifp); IFNET_RUNLOCK_NOSLEEP(); return (ifp); } struct ifnet * ifunit(const char *name) { struct ifnet *ifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0) break; } IFNET_RUNLOCK_NOSLEEP(); return (ifp); } /* * Hardware specific interface ioctls. */ static int ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) { struct ifreq *ifr; int error = 0, do_ifup = 0; int new_flags, temp_flags; size_t namelen, onamelen; size_t descrlen; char *descrbuf, *odescrbuf; char new_name[IFNAMSIZ]; struct ifaddr *ifa; struct sockaddr_dl *sdl; ifr = (struct ifreq *)data; switch (cmd) { case SIOCGIFINDEX: ifr->ifr_index = ifp->if_index; break; case SIOCGIFFLAGS: temp_flags = ifp->if_flags | ifp->if_drv_flags; ifr->ifr_flags = temp_flags & 0xffff; ifr->ifr_flagshigh = temp_flags >> 16; break; case SIOCGIFCAP: ifr->ifr_reqcap = ifp->if_capabilities; ifr->ifr_curcap = ifp->if_capenable; break; #ifdef MAC case SIOCGIFMAC: error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp); break; #endif case SIOCGIFMETRIC: ifr->ifr_metric = ifp->if_metric; break; case SIOCGIFMTU: ifr->ifr_mtu = ifp->if_mtu; break; case SIOCGIFPHYS: /* XXXGL: did this ever worked? */ ifr->ifr_phys = 0; break; case SIOCGIFDESCR: error = 0; sx_slock(&ifdescr_sx); if (ifp->if_description == NULL) error = ENOMSG; else { /* space for terminating nul */ descrlen = strlen(ifp->if_description) + 1; if (ifr->ifr_buffer.length < descrlen) ifr->ifr_buffer.buffer = NULL; else error = copyout(ifp->if_description, ifr->ifr_buffer.buffer, descrlen); ifr->ifr_buffer.length = descrlen; } sx_sunlock(&ifdescr_sx); break; case SIOCSIFDESCR: error = priv_check(td, PRIV_NET_SETIFDESCR); if (error) return (error); /* * Copy only (length-1) bytes to make sure that * if_description is always nul terminated. The * length parameter is supposed to count the * terminating nul in. */ if (ifr->ifr_buffer.length > ifdescr_maxlen) return (ENAMETOOLONG); else if (ifr->ifr_buffer.length == 0) descrbuf = NULL; else { descrbuf = malloc(ifr->ifr_buffer.length, M_IFDESCR, M_WAITOK | M_ZERO); error = copyin(ifr->ifr_buffer.buffer, descrbuf, ifr->ifr_buffer.length - 1); if (error) { free(descrbuf, M_IFDESCR); break; } } sx_xlock(&ifdescr_sx); odescrbuf = ifp->if_description; ifp->if_description = descrbuf; sx_xunlock(&ifdescr_sx); getmicrotime(&ifp->if_lastchange); free(odescrbuf, M_IFDESCR); break; case SIOCGIFFIB: ifr->ifr_fib = ifp->if_fib; break; case SIOCSIFFIB: error = priv_check(td, PRIV_NET_SETIFFIB); if (error) return (error); if (ifr->ifr_fib >= rt_numfibs) return (EINVAL); ifp->if_fib = ifr->ifr_fib; break; case SIOCSIFFLAGS: error = priv_check(td, PRIV_NET_SETIFFLAGS); if (error) return (error); /* * Currently, no driver owned flags pass the IFF_CANTCHANGE * check, so we don't need special handling here yet. */ new_flags = (ifr->ifr_flags & 0xffff) | (ifr->ifr_flagshigh << 16); if (ifp->if_flags & IFF_UP && (new_flags & IFF_UP) == 0) { if_down(ifp); } else if (new_flags & IFF_UP && (ifp->if_flags & IFF_UP) == 0) { do_ifup = 1; } /* See if permanently promiscuous mode bit is about to flip */ if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { if (new_flags & IFF_PPROMISC) ifp->if_flags |= IFF_PROMISC; else if (ifp->if_pcount == 0) ifp->if_flags &= ~IFF_PROMISC; if (log_promisc_mode_change) log(LOG_INFO, "%s: permanently promiscuous mode %s\n", ifp->if_xname, ((new_flags & IFF_PPROMISC) ? "enabled" : "disabled")); } ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | (new_flags &~ IFF_CANTCHANGE); if (ifp->if_ioctl) { (void) (*ifp->if_ioctl)(ifp, cmd, data); } if (do_ifup) if_up(ifp); getmicrotime(&ifp->if_lastchange); break; case SIOCSIFCAP: error = priv_check(td, PRIV_NET_SETIFCAP); if (error) return (error); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); if (ifr->ifr_reqcap & ~ifp->if_capabilities) return (EINVAL); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) getmicrotime(&ifp->if_lastchange); break; #ifdef MAC case SIOCSIFMAC: error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp); break; #endif case SIOCSIFNAME: error = priv_check(td, PRIV_NET_SETIFNAME); if (error) return (error); error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); if (error != 0) return (error); if (new_name[0] == '\0') return (EINVAL); if (new_name[IFNAMSIZ-1] != '\0') { new_name[IFNAMSIZ-1] = '\0'; if (strlen(new_name) == IFNAMSIZ-1) return (EINVAL); } if (ifunit(new_name) != NULL) return (EEXIST); /* * XXX: Locking. Nothing else seems to lock if_flags, * and there are numerous other races with the * ifunit() checks not being atomic with namespace * changes (renames, vmoves, if_attach, etc). */ ifp->if_flags |= IFF_RENAMING; /* Announce the departure of the interface. */ rt_ifannouncemsg(ifp, IFAN_DEPARTURE); EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); log(LOG_INFO, "%s: changing name to '%s'\n", ifp->if_xname, new_name); IF_ADDR_WLOCK(ifp); strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); ifa = ifp->if_addr; sdl = (struct sockaddr_dl *)ifa->ifa_addr; namelen = strlen(new_name); onamelen = sdl->sdl_nlen; /* * Move the address if needed. This is safe because we * allocate space for a name of length IFNAMSIZ when we * create this in if_attach(). */ if (namelen != onamelen) { bcopy(sdl->sdl_data + onamelen, sdl->sdl_data + namelen, sdl->sdl_alen); } bcopy(new_name, sdl->sdl_data, namelen); sdl->sdl_nlen = namelen; sdl = (struct sockaddr_dl *)ifa->ifa_netmask; bzero(sdl->sdl_data, onamelen); while (namelen != 0) sdl->sdl_data[--namelen] = 0xff; IF_ADDR_WUNLOCK(ifp); EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); /* Announce the return of the interface. */ rt_ifannouncemsg(ifp, IFAN_ARRIVAL); ifp->if_flags &= ~IFF_RENAMING; break; #ifdef VIMAGE case SIOCSIFVNET: error = priv_check(td, PRIV_NET_SETIFVNET); if (error) return (error); error = if_vmove_loan(td, ifp, ifr->ifr_name, ifr->ifr_jid); break; #endif case SIOCSIFMETRIC: error = priv_check(td, PRIV_NET_SETIFMETRIC); if (error) return (error); ifp->if_metric = ifr->ifr_metric; getmicrotime(&ifp->if_lastchange); break; case SIOCSIFPHYS: error = priv_check(td, PRIV_NET_SETIFPHYS); if (error) return (error); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCSIFMTU: { u_long oldmtu = ifp->if_mtu; error = priv_check(td, PRIV_NET_SETIFMTU); if (error) return (error); if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) return (EINVAL); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) { getmicrotime(&ifp->if_lastchange); rt_ifmsg(ifp); } /* * If the link MTU changed, do network layer specific procedure. */ if (ifp->if_mtu != oldmtu) { #ifdef INET6 nd6_setmtu(ifp); #endif rt_updatemtu(ifp); } break; } case SIOCADDMULTI: case SIOCDELMULTI: if (cmd == SIOCADDMULTI) error = priv_check(td, PRIV_NET_ADDMULTI); else error = priv_check(td, PRIV_NET_DELMULTI); if (error) return (error); /* Don't allow group membership on non-multicast interfaces. */ if ((ifp->if_flags & IFF_MULTICAST) == 0) return (EOPNOTSUPP); /* Don't let users screw up protocols' entries. */ if (ifr->ifr_addr.sa_family != AF_LINK) return (EINVAL); if (cmd == SIOCADDMULTI) { struct ifmultiaddr *ifma; /* * Userland is only permitted to join groups once * via the if_addmulti() KPI, because it cannot hold * struct ifmultiaddr * between calls. It may also * lose a race while we check if the membership * already exists. */ IF_ADDR_RLOCK(ifp); ifma = if_findmulti(ifp, &ifr->ifr_addr); IF_ADDR_RUNLOCK(ifp); if (ifma != NULL) error = EADDRINUSE; else error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); } else { error = if_delmulti(ifp, &ifr->ifr_addr); } if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCSIFPHYADDR: case SIOCDIFPHYADDR: #ifdef INET6 case SIOCSIFPHYADDR_IN6: #endif case SIOCSIFMEDIA: case SIOCSIFGENERIC: error = priv_check(td, PRIV_NET_HWIOCTL); if (error) return (error); if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); if (error == 0) getmicrotime(&ifp->if_lastchange); break; case SIOCGIFSTATUS: case SIOCGIFPSRCADDR: case SIOCGIFPDSTADDR: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: case SIOCGIFGENERIC: if (ifp->if_ioctl == NULL) return (EOPNOTSUPP); error = (*ifp->if_ioctl)(ifp, cmd, data); break; case SIOCSIFLLADDR: error = priv_check(td, PRIV_NET_SETLLADDR); if (error) return (error); error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len); break; case SIOCAIFGROUP: { struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; error = priv_check(td, PRIV_NET_ADDIFGROUP); if (error) return (error); if ((error = if_addgroup(ifp, ifgr->ifgr_group))) return (error); break; } case SIOCGIFGROUP: if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp))) return (error); break; case SIOCDIFGROUP: { struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; error = priv_check(td, PRIV_NET_DELIFGROUP); if (error) return (error); if ((error = if_delgroup(ifp, ifgr->ifgr_group))) return (error); break; } default: error = ENOIOCTL; break; } return (error); } /* COMPAT_SVR4 */ #define OSIOCGIFCONF _IOWR('i', 20, struct ifconf) #ifdef COMPAT_FREEBSD32 struct ifconf32 { int32_t ifc_len; union { uint32_t ifcu_buf; uint32_t ifcu_req; } ifc_ifcu; }; #define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32) #endif /* * Interface ioctls. */ int ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) { struct ifnet *ifp; struct ifreq *ifr; int error; int oif_flags; #ifdef VIMAGE int shutdown; #endif CURVNET_SET(so->so_vnet); #ifdef VIMAGE /* Make sure the VNET is stable. */ shutdown = (so->so_vnet->vnet_state > SI_SUB_VNET && so->so_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; if (shutdown) { CURVNET_RESTORE(); return (EBUSY); } #endif switch (cmd) { case SIOCGIFCONF: case OSIOCGIFCONF: /* COMPAT_SVR4 */ error = ifconf(cmd, data); CURVNET_RESTORE(); return (error); #ifdef COMPAT_FREEBSD32 case SIOCGIFCONF32: { struct ifconf32 *ifc32; struct ifconf ifc; ifc32 = (struct ifconf32 *)data; ifc.ifc_len = ifc32->ifc_len; ifc.ifc_buf = PTRIN(ifc32->ifc_buf); error = ifconf(SIOCGIFCONF, (void *)&ifc); CURVNET_RESTORE(); if (error == 0) ifc32->ifc_len = ifc.ifc_len; return (error); } #endif } ifr = (struct ifreq *)data; switch (cmd) { #ifdef VIMAGE case SIOCSIFRVNET: error = priv_check(td, PRIV_NET_SETIFVNET); if (error == 0) error = if_vmove_reclaim(td, ifr->ifr_name, ifr->ifr_jid); CURVNET_RESTORE(); return (error); #endif case SIOCIFCREATE: case SIOCIFCREATE2: error = priv_check(td, PRIV_NET_IFCREATE); if (error == 0) error = if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL); CURVNET_RESTORE(); return (error); case SIOCIFDESTROY: error = priv_check(td, PRIV_NET_IFDESTROY); if (error == 0) error = if_clone_destroy(ifr->ifr_name); CURVNET_RESTORE(); return (error); case SIOCIFGCLONERS: error = if_clone_list((struct if_clonereq *)data); CURVNET_RESTORE(); return (error); case SIOCGIFGMEMB: error = if_getgroupmembers((struct ifgroupreq *)data); CURVNET_RESTORE(); return (error); #if defined(INET) || defined(INET6) case SIOCSVH: case SIOCGVH: if (carp_ioctl_p == NULL) error = EPROTONOSUPPORT; else error = (*carp_ioctl_p)(ifr, cmd, td); CURVNET_RESTORE(); return (error); #endif } ifp = ifunit_ref(ifr->ifr_name); if (ifp == NULL) { CURVNET_RESTORE(); return (ENXIO); } error = ifhwioctl(cmd, ifp, data, td); if (error != ENOIOCTL) { if_rele(ifp); CURVNET_RESTORE(); return (error); } oif_flags = ifp->if_flags; if (so->so_proto == NULL) { if_rele(ifp); CURVNET_RESTORE(); return (EOPNOTSUPP); } /* * Pass the request on to the socket control method, and if the * latter returns EOPNOTSUPP, directly to the interface. * * Make an exception for the legacy SIOCSIF* requests. Drivers * trust SIOCSIFADDR et al to come from an already privileged * layer, and do not perform any credentials checks or input * validation. */ error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, ifp, td)); if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL && cmd != SIOCSIFADDR && cmd != SIOCSIFBRDADDR && cmd != SIOCSIFDSTADDR && cmd != SIOCSIFNETMASK) error = (*ifp->if_ioctl)(ifp, cmd, data); if ((oif_flags ^ ifp->if_flags) & IFF_UP) { #ifdef INET6 if (ifp->if_flags & IFF_UP) in6_if_up(ifp); #endif } if_rele(ifp); CURVNET_RESTORE(); return (error); } /* * The code common to handling reference counted flags, * e.g., in ifpromisc() and if_allmulti(). * The "pflag" argument can specify a permanent mode flag to check, * such as IFF_PPROMISC for promiscuous mode; should be 0 if none. * * Only to be used on stack-owned flags, not driver-owned flags. */ static int if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch) { struct ifreq ifr; int error; int oldflags, oldcount; /* Sanity checks to catch programming errors */ KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0, ("%s: setting driver-owned flag %d", __func__, flag)); if (onswitch) KASSERT(*refcount >= 0, ("%s: increment negative refcount %d for flag %d", __func__, *refcount, flag)); else KASSERT(*refcount > 0, ("%s: decrement non-positive refcount %d for flag %d", __func__, *refcount, flag)); /* In case this mode is permanent, just touch refcount */ if (ifp->if_flags & pflag) { *refcount += onswitch ? 1 : -1; return (0); } /* Save ifnet parameters for if_ioctl() may fail */ oldcount = *refcount; oldflags = ifp->if_flags; /* * See if we aren't the only and touching refcount is enough. * Actually toggle interface flag if we are the first or last. */ if (onswitch) { if ((*refcount)++) return (0); ifp->if_flags |= flag; } else { if (--(*refcount)) return (0); ifp->if_flags &= ~flag; } /* Call down the driver since we've changed interface flags */ if (ifp->if_ioctl == NULL) { error = EOPNOTSUPP; goto recover; } ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); if (error) goto recover; /* Notify userland that interface flags have changed */ rt_ifmsg(ifp); return (0); recover: /* Recover after driver error */ *refcount = oldcount; ifp->if_flags = oldflags; return (error); } /* * Set/clear promiscuous mode on interface ifp based on the truth value * of pswitch. The calls are reference counted so that only the first * "on" request actually has an effect, as does the final "off" request. * Results are undefined if the "off" and "on" requests are not matched. */ int ifpromisc(struct ifnet *ifp, int pswitch) { int error; int oldflags = ifp->if_flags; error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC, &ifp->if_pcount, pswitch); /* If promiscuous mode status has changed, log a message */ if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC) && log_promisc_mode_change) log(LOG_INFO, "%s: promiscuous mode %s\n", ifp->if_xname, (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled"); return (error); } /* * Return interface configuration * of system. List may be used * in later ioctl's (above) to get * other information. */ /*ARGSUSED*/ static int ifconf(u_long cmd, caddr_t data) { struct ifconf *ifc = (struct ifconf *)data; struct ifnet *ifp; struct ifaddr *ifa; struct ifreq ifr; struct sbuf *sb; int error, full = 0, valid_len, max_len; /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */ max_len = MAXPHYS - 1; /* Prevent hostile input from being able to crash the system */ if (ifc->ifc_len <= 0) return (EINVAL); again: if (ifc->ifc_len <= max_len) { max_len = ifc->ifc_len; full = 1; } sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN); max_len = 0; valid_len = 0; IFNET_RLOCK(); TAILQ_FOREACH(ifp, &V_ifnet, if_link) { int addrs; /* * Zero the ifr_name buffer to make sure we don't * disclose the contents of the stack. */ memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name)); if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) >= sizeof(ifr.ifr_name)) { sbuf_delete(sb); IFNET_RUNLOCK(); return (ENAMETOOLONG); } addrs = 0; IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { struct sockaddr *sa = ifa->ifa_addr; if (prison_if(curthread->td_ucred, sa) != 0) continue; addrs++; /* COMPAT_SVR4 */ if (cmd == OSIOCGIFCONF) { struct osockaddr *osa = (struct osockaddr *)&ifr.ifr_addr; ifr.ifr_addr = *sa; osa->sa_family = sa->sa_family; sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); } else if (sa->sa_len <= sizeof(*sa)) { ifr.ifr_addr = *sa; sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); } else { sbuf_bcat(sb, &ifr, offsetof(struct ifreq, ifr_addr)); max_len += offsetof(struct ifreq, ifr_addr); sbuf_bcat(sb, sa, sa->sa_len); max_len += sa->sa_len; } if (sbuf_error(sb) == 0) valid_len = sbuf_len(sb); } IF_ADDR_RUNLOCK(ifp); if (addrs == 0) { bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr)); sbuf_bcat(sb, &ifr, sizeof(ifr)); max_len += sizeof(ifr); if (sbuf_error(sb) == 0) valid_len = sbuf_len(sb); } } IFNET_RUNLOCK(); /* * If we didn't allocate enough space (uncommon), try again. If * we have already allocated as much space as we are allowed, * return what we've got. */ if (valid_len != max_len && !full) { sbuf_delete(sb); goto again; } ifc->ifc_len = valid_len; sbuf_finish(sb); error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len); sbuf_delete(sb); return (error); } /* * Just like ifpromisc(), but for all-multicast-reception mode. */ int if_allmulti(struct ifnet *ifp, int onswitch) { return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch)); } struct ifmultiaddr * if_findmulti(struct ifnet *ifp, const struct sockaddr *sa) { struct ifmultiaddr *ifma; IF_ADDR_LOCK_ASSERT(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (sa->sa_family == AF_LINK) { if (sa_dl_equal(ifma->ifma_addr, sa)) break; } else { if (sa_equal(ifma->ifma_addr, sa)) break; } } return ifma; } /* * Allocate a new ifmultiaddr and initialize based on passed arguments. We * make copies of passed sockaddrs. The ifmultiaddr will not be added to * the ifnet multicast address list here, so the caller must do that and * other setup work (such as notifying the device driver). The reference * count is initialized to 1. */ static struct ifmultiaddr * if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa, int mflags) { struct ifmultiaddr *ifma; struct sockaddr *dupsa; ifma = malloc(sizeof *ifma, M_IFMADDR, mflags | M_ZERO); if (ifma == NULL) return (NULL); dupsa = malloc(sa->sa_len, M_IFMADDR, mflags); if (dupsa == NULL) { free(ifma, M_IFMADDR); return (NULL); } bcopy(sa, dupsa, sa->sa_len); ifma->ifma_addr = dupsa; ifma->ifma_ifp = ifp; ifma->ifma_refcount = 1; ifma->ifma_protospec = NULL; if (llsa == NULL) { ifma->ifma_lladdr = NULL; return (ifma); } dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags); if (dupsa == NULL) { free(ifma->ifma_addr, M_IFMADDR); free(ifma, M_IFMADDR); return (NULL); } bcopy(llsa, dupsa, llsa->sa_len); ifma->ifma_lladdr = dupsa; return (ifma); } /* * if_freemulti: free ifmultiaddr structure and possibly attached related * addresses. The caller is responsible for implementing reference * counting, notifying the driver, handling routing messages, and releasing * any dependent link layer state. */ static void if_freemulti(struct ifmultiaddr *ifma) { KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d", ifma->ifma_refcount)); if (ifma->ifma_lladdr != NULL) free(ifma->ifma_lladdr, M_IFMADDR); free(ifma->ifma_addr, M_IFMADDR); free(ifma, M_IFMADDR); } /* * Register an additional multicast address with a network interface. * * - If the address is already present, bump the reference count on the * address and return. * - If the address is not link-layer, look up a link layer address. * - Allocate address structures for one or both addresses, and attach to the * multicast address list on the interface. If automatically adding a link * layer address, the protocol address will own a reference to the link * layer address, to be freed when it is freed. * - Notify the network device driver of an addition to the multicast address * list. * * 'sa' points to caller-owned memory with the desired multicast address. * * 'retifma' will be used to return a pointer to the resulting multicast * address reference, if desired. */ int if_addmulti(struct ifnet *ifp, struct sockaddr *sa, struct ifmultiaddr **retifma) { struct ifmultiaddr *ifma, *ll_ifma; struct sockaddr *llsa; struct sockaddr_dl sdl; int error; /* * If the address is already present, return a new reference to it; * otherwise, allocate storage and set up a new address. */ IF_ADDR_WLOCK(ifp); ifma = if_findmulti(ifp, sa); if (ifma != NULL) { ifma->ifma_refcount++; if (retifma != NULL) *retifma = ifma; IF_ADDR_WUNLOCK(ifp); return (0); } /* * The address isn't already present; resolve the protocol address * into a link layer address, and then look that up, bump its * refcount or allocate an ifma for that also. * Most link layer resolving functions returns address data which * fits inside default sockaddr_dl structure. However callback * can allocate another sockaddr structure, in that case we need to * free it later. */ llsa = NULL; ll_ifma = NULL; if (ifp->if_resolvemulti != NULL) { /* Provide called function with buffer size information */ sdl.sdl_len = sizeof(sdl); llsa = (struct sockaddr *)&sdl; error = ifp->if_resolvemulti(ifp, &llsa, sa); if (error) goto unlock_out; } /* * Allocate the new address. Don't hook it up yet, as we may also * need to allocate a link layer multicast address. */ ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT); if (ifma == NULL) { error = ENOMEM; goto free_llsa_out; } /* * If a link layer address is found, we'll need to see if it's * already present in the address list, or allocate is as well. * When this block finishes, the link layer address will be on the * list. */ if (llsa != NULL) { ll_ifma = if_findmulti(ifp, llsa); if (ll_ifma == NULL) { ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT); if (ll_ifma == NULL) { --ifma->ifma_refcount; if_freemulti(ifma); error = ENOMEM; goto free_llsa_out; } TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma, ifma_link); } else ll_ifma->ifma_refcount++; ifma->ifma_llifma = ll_ifma; } /* * We now have a new multicast address, ifma, and possibly a new or * referenced link layer address. Add the primary address to the * ifnet address list. */ TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); if (retifma != NULL) *retifma = ifma; /* * Must generate the message while holding the lock so that 'ifma' * pointer is still valid. */ rt_newmaddrmsg(RTM_NEWMADDR, ifma); IF_ADDR_WUNLOCK(ifp); /* * We are certain we have added something, so call down to the * interface to let them know about it. */ if (ifp->if_ioctl != NULL) { (void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0); } if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) link_free_sdl(llsa); return (0); free_llsa_out: if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) link_free_sdl(llsa); unlock_out: IF_ADDR_WUNLOCK(ifp); return (error); } /* * Delete a multicast group membership by network-layer group address. * * Returns ENOENT if the entry could not be found. If ifp no longer * exists, results are undefined. This entry point should only be used * from subsystems which do appropriate locking to hold ifp for the * duration of the call. * Network-layer protocol domains must use if_delmulti_ifma(). */ int if_delmulti(struct ifnet *ifp, struct sockaddr *sa) { struct ifmultiaddr *ifma; int lastref; #ifdef INVARIANTS struct ifnet *oifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) ifp = NULL; IFNET_RUNLOCK_NOSLEEP(); KASSERT(ifp != NULL, ("%s: ifnet went away", __func__)); #endif if (ifp == NULL) return (ENOENT); IF_ADDR_WLOCK(ifp); lastref = 0; ifma = if_findmulti(ifp, sa); if (ifma != NULL) lastref = if_delmulti_locked(ifp, ifma, 0); IF_ADDR_WUNLOCK(ifp); if (ifma == NULL) return (ENOENT); if (lastref && ifp->if_ioctl != NULL) { (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); } return (0); } /* * Delete all multicast group membership for an interface. * Should be used to quickly flush all multicast filters. */ void if_delallmulti(struct ifnet *ifp) { struct ifmultiaddr *ifma; struct ifmultiaddr *next; IF_ADDR_WLOCK(ifp); TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) if_delmulti_locked(ifp, ifma, 0); IF_ADDR_WUNLOCK(ifp); } /* * Delete a multicast group membership by group membership pointer. * Network-layer protocol domains must use this routine. * * It is safe to call this routine if the ifp disappeared. */ void if_delmulti_ifma(struct ifmultiaddr *ifma) { struct ifnet *ifp; int lastref; ifp = ifma->ifma_ifp; #ifdef DIAGNOSTIC if (ifp == NULL) { printf("%s: ifma_ifp seems to be detached\n", __func__); } else { struct ifnet *oifp; IFNET_RLOCK_NOSLEEP(); TAILQ_FOREACH(oifp, &V_ifnet, if_link) if (ifp == oifp) break; if (ifp != oifp) { printf("%s: ifnet %p disappeared\n", __func__, ifp); ifp = NULL; } IFNET_RUNLOCK_NOSLEEP(); } #endif /* * If and only if the ifnet instance exists: Acquire the address lock. */ if (ifp != NULL) IF_ADDR_WLOCK(ifp); lastref = if_delmulti_locked(ifp, ifma, 0); if (ifp != NULL) { /* * If and only if the ifnet instance exists: * Release the address lock. * If the group was left: update the hardware hash filter. */ IF_ADDR_WUNLOCK(ifp); if (lastref && ifp->if_ioctl != NULL) { (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); } } } /* * Perform deletion of network-layer and/or link-layer multicast address. * * Return 0 if the reference count was decremented. * Return 1 if the final reference was released, indicating that the * hardware hash filter should be reprogrammed. */ static int if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching) { struct ifmultiaddr *ll_ifma; if (ifp != NULL && ifma->ifma_ifp != NULL) { KASSERT(ifma->ifma_ifp == ifp, ("%s: inconsistent ifp %p", __func__, ifp)); IF_ADDR_WLOCK_ASSERT(ifp); } ifp = ifma->ifma_ifp; /* * If the ifnet is detaching, null out references to ifnet, * so that upper protocol layers will notice, and not attempt * to obtain locks for an ifnet which no longer exists. The * routing socket announcement must happen before the ifnet * instance is detached from the system. */ if (detaching) { #ifdef DIAGNOSTIC printf("%s: detaching ifnet instance %p\n", __func__, ifp); #endif /* * ifp may already be nulled out if we are being reentered * to delete the ll_ifma. */ if (ifp != NULL) { rt_newmaddrmsg(RTM_DELMADDR, ifma); ifma->ifma_ifp = NULL; } } if (--ifma->ifma_refcount > 0) return 0; /* * If this ifma is a network-layer ifma, a link-layer ifma may * have been associated with it. Release it first if so. */ ll_ifma = ifma->ifma_llifma; if (ll_ifma != NULL) { KASSERT(ifma->ifma_lladdr != NULL, ("%s: llifma w/o lladdr", __func__)); if (detaching) ll_ifma->ifma_ifp = NULL; /* XXX */ if (--ll_ifma->ifma_refcount == 0) { if (ifp != NULL) { TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifma_link); } if_freemulti(ll_ifma); } } if (ifp != NULL) TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); if_freemulti(ifma); /* * The last reference to this instance of struct ifmultiaddr * was released; the hardware should be notified of this change. */ return 1; } /* * Set the link layer address on an interface. * * At this time we only support certain types of interfaces, * and we don't allow the length of the address to change. * * Set noinline to be dtrace-friendly */ __noinline int if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) { struct sockaddr_dl *sdl; struct ifaddr *ifa; struct ifreq ifr; IF_ADDR_RLOCK(ifp); ifa = ifp->if_addr; if (ifa == NULL) { IF_ADDR_RUNLOCK(ifp); return (EINVAL); } ifa_ref(ifa); IF_ADDR_RUNLOCK(ifp); sdl = (struct sockaddr_dl *)ifa->ifa_addr; if (sdl == NULL) { ifa_free(ifa); return (EINVAL); } if (len != sdl->sdl_alen) { /* don't allow length to change */ ifa_free(ifa); return (EINVAL); } switch (ifp->if_type) { case IFT_ETHER: case IFT_FDDI: case IFT_XETHER: case IFT_ISO88025: case IFT_L2VLAN: case IFT_BRIDGE: case IFT_ARCNET: case IFT_IEEE8023ADLAG: case IFT_IEEE80211: bcopy(lladdr, LLADDR(sdl), len); ifa_free(ifa); break; default: ifa_free(ifa); return (ENODEV); } /* * If the interface is already up, we need * to re-init it in order to reprogram its * address filter. */ if ((ifp->if_flags & IFF_UP) != 0) { if (ifp->if_ioctl) { ifp->if_flags &= ~IFF_UP; ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); ifp->if_flags |= IFF_UP; ifr.ifr_flags = ifp->if_flags & 0xffff; ifr.ifr_flagshigh = ifp->if_flags >> 16; (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); } } EVENTHANDLER_INVOKE(iflladdr_event, ifp); return (0); } /* * Compat function for handling basic encapsulation requests. * Not converted stacks (FDDI, IB, ..) supports traditional * output model: ARP (and other similar L2 protocols) are handled * inside output routine, arpresolve/nd6_resolve() returns MAC * address instead of full prepend. * * This function creates calculated header==MAC for IPv4/IPv6 and * returns EAFNOSUPPORT (which is then handled in ARP code) for other * address families. */ static int if_requestencap_default(struct ifnet *ifp, struct if_encap_req *req) { if (req->rtype != IFENCAP_LL) return (EOPNOTSUPP); if (req->bufsize < req->lladdr_len) return (ENOMEM); switch (req->family) { case AF_INET: case AF_INET6: break; default: return (EAFNOSUPPORT); } /* Copy lladdr to storage as is */ memmove(req->buf, req->lladdr, req->lladdr_len); req->bufsize = req->lladdr_len; req->lladdr_off = 0; return (0); } /* * The name argument must be a pointer to storage which will last as * long as the interface does. For physical devices, the result of * device_get_name(dev) is a good choice and for pseudo-devices a * static string works well. */ void if_initname(struct ifnet *ifp, const char *name, int unit) { ifp->if_dname = name; ifp->if_dunit = unit; if (unit != IF_DUNIT_NONE) snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); else strlcpy(ifp->if_xname, name, IFNAMSIZ); } int if_printf(struct ifnet *ifp, const char * fmt, ...) { va_list ap; int retval; retval = printf("%s: ", ifp->if_xname); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } void if_start(struct ifnet *ifp) { (*(ifp)->if_start)(ifp); } /* * Backwards compatibility interface for drivers * that have not implemented it */ static int if_transmit(struct ifnet *ifp, struct mbuf *m) { int error; IFQ_HANDOFF(ifp, m, error); return (error); } static void if_input_default(struct ifnet *ifp __unused, struct mbuf *m) { m_freem(m); } int if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust) { int active = 0; IF_LOCK(ifq); if (_IF_QFULL(ifq)) { IF_UNLOCK(ifq); if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); m_freem(m); return (0); } if (ifp != NULL) { if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len + adjust); if (m->m_flags & (M_BCAST|M_MCAST)) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); active = ifp->if_drv_flags & IFF_DRV_OACTIVE; } _IF_ENQUEUE(ifq, m); IF_UNLOCK(ifq); if (ifp != NULL && !active) (*(ifp)->if_start)(ifp); return (1); } void if_register_com_alloc(u_char type, if_com_alloc_t *a, if_com_free_t *f) { KASSERT(if_com_alloc[type] == NULL, ("if_register_com_alloc: %d already registered", type)); KASSERT(if_com_free[type] == NULL, ("if_register_com_alloc: %d free already registered", type)); if_com_alloc[type] = a; if_com_free[type] = f; } void if_deregister_com_alloc(u_char type) { KASSERT(if_com_alloc[type] != NULL, ("if_deregister_com_alloc: %d not registered", type)); KASSERT(if_com_free[type] != NULL, ("if_deregister_com_alloc: %d free not registered", type)); if_com_alloc[type] = NULL; if_com_free[type] = NULL; } /* API for driver access to network stack owned ifnet.*/ uint64_t if_setbaudrate(struct ifnet *ifp, uint64_t baudrate) { uint64_t oldbrate; oldbrate = ifp->if_baudrate; ifp->if_baudrate = baudrate; return (oldbrate); } uint64_t if_getbaudrate(if_t ifp) { return (((struct ifnet *)ifp)->if_baudrate); } int if_setcapabilities(if_t ifp, int capabilities) { ((struct ifnet *)ifp)->if_capabilities = capabilities; return (0); } int if_setcapabilitiesbit(if_t ifp, int setbit, int clearbit) { ((struct ifnet *)ifp)->if_capabilities |= setbit; ((struct ifnet *)ifp)->if_capabilities &= ~clearbit; return (0); } int if_getcapabilities(if_t ifp) { return ((struct ifnet *)ifp)->if_capabilities; } int if_setcapenable(if_t ifp, int capabilities) { ((struct ifnet *)ifp)->if_capenable = capabilities; return (0); } int if_setcapenablebit(if_t ifp, int setcap, int clearcap) { if(setcap) ((struct ifnet *)ifp)->if_capenable |= setcap; if(clearcap) ((struct ifnet *)ifp)->if_capenable &= ~clearcap; return (0); } const char * if_getdname(if_t ifp) { return ((struct ifnet *)ifp)->if_dname; } int if_togglecapenable(if_t ifp, int togglecap) { ((struct ifnet *)ifp)->if_capenable ^= togglecap; return (0); } int if_getcapenable(if_t ifp) { return ((struct ifnet *)ifp)->if_capenable; } /* * This is largely undesirable because it ties ifnet to a device, but does * provide flexiblity for an embedded product vendor. Should be used with * the understanding that it violates the interface boundaries, and should be * a last resort only. */ int if_setdev(if_t ifp, void *dev) { return (0); } int if_setdrvflagbits(if_t ifp, int set_flags, int clear_flags) { ((struct ifnet *)ifp)->if_drv_flags |= set_flags; ((struct ifnet *)ifp)->if_drv_flags &= ~clear_flags; return (0); } int if_getdrvflags(if_t ifp) { return ((struct ifnet *)ifp)->if_drv_flags; } int if_setdrvflags(if_t ifp, int flags) { ((struct ifnet *)ifp)->if_drv_flags = flags; return (0); } int if_setflags(if_t ifp, int flags) { ((struct ifnet *)ifp)->if_flags = flags; return (0); } int if_setflagbits(if_t ifp, int set, int clear) { ((struct ifnet *)ifp)->if_flags |= set; ((struct ifnet *)ifp)->if_flags &= ~clear; return (0); } int if_getflags(if_t ifp) { return ((struct ifnet *)ifp)->if_flags; } int if_clearhwassist(if_t ifp) { ((struct ifnet *)ifp)->if_hwassist = 0; return (0); } int if_sethwassistbits(if_t ifp, int toset, int toclear) { ((struct ifnet *)ifp)->if_hwassist |= toset; ((struct ifnet *)ifp)->if_hwassist &= ~toclear; return (0); } int if_sethwassist(if_t ifp, int hwassist_bit) { ((struct ifnet *)ifp)->if_hwassist = hwassist_bit; return (0); } int if_gethwassist(if_t ifp) { return ((struct ifnet *)ifp)->if_hwassist; } int if_setmtu(if_t ifp, int mtu) { ((struct ifnet *)ifp)->if_mtu = mtu; return (0); } int if_getmtu(if_t ifp) { return ((struct ifnet *)ifp)->if_mtu; } int if_getmtu_family(if_t ifp, int family) { struct domain *dp; for (dp = domains; dp; dp = dp->dom_next) { if (dp->dom_family == family && dp->dom_ifmtu != NULL) return (dp->dom_ifmtu((struct ifnet *)ifp)); } return (((struct ifnet *)ifp)->if_mtu); } int if_setsoftc(if_t ifp, void *softc) { ((struct ifnet *)ifp)->if_softc = softc; return (0); } void * if_getsoftc(if_t ifp) { return ((struct ifnet *)ifp)->if_softc; } void if_setrcvif(struct mbuf *m, if_t ifp) { m->m_pkthdr.rcvif = (struct ifnet *)ifp; } void if_setvtag(struct mbuf *m, uint16_t tag) { m->m_pkthdr.ether_vtag = tag; } uint16_t if_getvtag(struct mbuf *m) { return (m->m_pkthdr.ether_vtag); } int if_sendq_empty(if_t ifp) { return IFQ_DRV_IS_EMPTY(&((struct ifnet *)ifp)->if_snd); } struct ifaddr * if_getifaddr(if_t ifp) { return ((struct ifnet *)ifp)->if_addr; } int if_getamcount(if_t ifp) { return ((struct ifnet *)ifp)->if_amcount; } int if_setsendqready(if_t ifp) { IFQ_SET_READY(&((struct ifnet *)ifp)->if_snd); return (0); } int if_setsendqlen(if_t ifp, int tx_desc_count) { IFQ_SET_MAXLEN(&((struct ifnet *)ifp)->if_snd, tx_desc_count); ((struct ifnet *)ifp)->if_snd.ifq_drv_maxlen = tx_desc_count; return (0); } int if_vlantrunkinuse(if_t ifp) { return ((struct ifnet *)ifp)->if_vlantrunk != NULL?1:0; } int if_input(if_t ifp, struct mbuf* sendmp) { (*((struct ifnet *)ifp)->if_input)((struct ifnet *)ifp, sendmp); return (0); } /* XXX */ #ifndef ETH_ADDR_LEN #define ETH_ADDR_LEN 6 #endif int if_setupmultiaddr(if_t ifp, void *mta, int *cnt, int max) { struct ifmultiaddr *ifma; uint8_t *lmta = (uint8_t *)mta; int mcnt = 0; TAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == max) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &lmta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } *cnt = mcnt; return (0); } int if_multiaddr_array(if_t ifp, void *mta, int *cnt, int max) { int error; if_maddr_rlock(ifp); error = if_setupmultiaddr(ifp, mta, cnt, max); if_maddr_runlock(ifp); return (error); } int if_multiaddr_count(if_t ifp, int max) { struct ifmultiaddr *ifma; int count; count = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; count++; if (count == max) break; } if_maddr_runlock(ifp); return (count); } int if_multi_apply(struct ifnet *ifp, int (*filter)(void *, struct ifmultiaddr *, int), void *arg) { struct ifmultiaddr *ifma; int cnt = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) cnt += filter(arg, ifma, cnt); if_maddr_runlock(ifp); return (cnt); } struct mbuf * if_dequeue(if_t ifp) { struct mbuf *m; IFQ_DRV_DEQUEUE(&((struct ifnet *)ifp)->if_snd, m); return (m); } int if_sendq_prepend(if_t ifp, struct mbuf *m) { IFQ_DRV_PREPEND(&((struct ifnet *)ifp)->if_snd, m); return (0); } int if_setifheaderlen(if_t ifp, int len) { ((struct ifnet *)ifp)->if_hdrlen = len; return (0); } caddr_t if_getlladdr(if_t ifp) { return (IF_LLADDR((struct ifnet *)ifp)); } void * if_gethandle(u_char type) { return (if_alloc(type)); } void if_bpfmtap(if_t ifh, struct mbuf *m) { struct ifnet *ifp = (struct ifnet *)ifh; BPF_MTAP(ifp, m); } void if_etherbpfmtap(if_t ifh, struct mbuf *m) { struct ifnet *ifp = (struct ifnet *)ifh; ETHER_BPF_MTAP(ifp, m); } void if_vlancap(if_t ifh) { struct ifnet *ifp = (struct ifnet *)ifh; VLAN_CAPABILITIES(ifp); } void if_setinitfn(if_t ifp, void (*init_fn)(void *)) { ((struct ifnet *)ifp)->if_init = init_fn; } void if_setioctlfn(if_t ifp, int (*ioctl_fn)(if_t, u_long, caddr_t)) { ((struct ifnet *)ifp)->if_ioctl = (void *)ioctl_fn; } void if_setstartfn(if_t ifp, void (*start_fn)(if_t)) { ((struct ifnet *)ifp)->if_start = (void *)start_fn; } void if_settransmitfn(if_t ifp, if_transmit_fn_t start_fn) { ((struct ifnet *)ifp)->if_transmit = start_fn; } void if_setqflushfn(if_t ifp, if_qflush_fn_t flush_fn) { ((struct ifnet *)ifp)->if_qflush = flush_fn; } void if_setgetcounterfn(if_t ifp, if_get_counter_t fn) { ifp->if_get_counter = fn; } /* Revisit these - These are inline functions originally. */ int drbr_inuse_drv(if_t ifh, struct buf_ring *br) { return drbr_inuse(ifh, br); } struct mbuf* drbr_dequeue_drv(if_t ifh, struct buf_ring *br) { return drbr_dequeue(ifh, br); } int drbr_needs_enqueue_drv(if_t ifh, struct buf_ring *br) { return drbr_needs_enqueue(ifh, br); } int drbr_enqueue_drv(if_t ifh, struct buf_ring *br, struct mbuf *m) { return drbr_enqueue(ifh, br, m); } Index: projects/clang400-import/sys/net/iflib.c =================================================================== --- projects/clang400-import/sys/net/iflib.c (revision 312719) +++ projects/clang400-import/sys/net/iflib.c (revision 312720) @@ -1,5147 +1,5172 @@ /*- * Copyright (c) 2014-2016, Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #if defined(__i386__) || defined(__amd64__) #include #include #include #include #include #include #endif /* * enable accounting of every mbuf as it comes in to and goes out of iflib's software descriptor references */ #define MEMORY_LOGGING 0 /* * Enable mbuf vectors for compressing long mbuf chains */ /* * NB: * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead * we prefetch needs to be determined by the time spent in m_free vis a vis * the cost of a prefetch. This will of course vary based on the workload: * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which * is quite expensive, thus suggesting very little prefetch. * - small packet forwarding which is just returning a single mbuf to * UMA will typically be very fast vis a vis the cost of a memory * access. */ /* * File organization: * - private structures * - iflib private utility functions * - ifnet functions * - vlan registry and other exported functions * - iflib public core functions * * */ static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); struct iflib_txq; typedef struct iflib_txq *iflib_txq_t; struct iflib_rxq; typedef struct iflib_rxq *iflib_rxq_t; struct iflib_fl; typedef struct iflib_fl *iflib_fl_t; struct iflib_ctx; typedef struct iflib_filter_info { driver_filter_t *ifi_filter; void *ifi_filter_arg; struct grouptask *ifi_task; struct iflib_ctx *ifi_ctx; } *iflib_filter_info_t; struct iflib_ctx { KOBJ_FIELDS; /* * Pointer to hardware driver's softc */ void *ifc_softc; device_t ifc_dev; if_t ifc_ifp; cpuset_t ifc_cpus; if_shared_ctx_t ifc_sctx; struct if_softc_ctx ifc_softc_ctx; struct mtx ifc_mtx; uint16_t ifc_nhwtxqs; uint16_t ifc_nhwrxqs; iflib_txq_t ifc_txqs; iflib_rxq_t ifc_rxqs; uint32_t ifc_if_flags; uint32_t ifc_flags; uint32_t ifc_max_fl_buf_size; int ifc_in_detach; int ifc_link_state; int ifc_link_irq; int ifc_pause_frames; int ifc_watchdog_events; struct cdev *ifc_led_dev; struct resource *ifc_msix_mem; struct if_irq ifc_legacy_irq; struct grouptask ifc_admin_task; struct grouptask ifc_vflr_task; struct iflib_filter_info ifc_filter_info; struct ifmedia ifc_media; struct sysctl_oid *ifc_sysctl_node; uint16_t ifc_sysctl_ntxqs; uint16_t ifc_sysctl_nrxqs; uint16_t ifc_sysctl_qs_eq_override; uint16_t ifc_sysctl_ntxds[8]; uint16_t ifc_sysctl_nrxds[8]; struct if_txrx ifc_txrx; #define isc_txd_encap ifc_txrx.ift_txd_encap #define isc_txd_flush ifc_txrx.ift_txd_flush #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update #define isc_rxd_available ifc_txrx.ift_rxd_available #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_flush ifc_txrx.ift_rxd_flush #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_legacy_intr ifc_txrx.ift_legacy_intr eventhandler_tag ifc_vlan_attach_event; eventhandler_tag ifc_vlan_detach_event; uint8_t ifc_mac[ETHER_ADDR_LEN]; char ifc_mtx_name[16]; }; void * iflib_get_softc(if_ctx_t ctx) { return (ctx->ifc_softc); } device_t iflib_get_dev(if_ctx_t ctx) { return (ctx->ifc_dev); } if_t iflib_get_ifp(if_ctx_t ctx) { return (ctx->ifc_ifp); } struct ifmedia * iflib_get_media(if_ctx_t ctx) { return (&ctx->ifc_media); } void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) { bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); } if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx) { return (&ctx->ifc_softc_ctx); } if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx) { return (ctx->ifc_sctx); } #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) #define RX_SW_DESC_MAP_CREATED (1 << 0) #define TX_SW_DESC_MAP_CREATED (1 << 1) #define RX_SW_DESC_INUSE (1 << 3) #define TX_SW_DESC_MAPPED (1 << 4) typedef struct iflib_sw_rx_desc { bus_dmamap_t ifsd_map; /* bus_dma map for packet */ struct mbuf *ifsd_m; /* rx: uninitialized mbuf */ caddr_t ifsd_cl; /* direct cluster pointer for rx */ uint16_t ifsd_flags; } *iflib_rxsd_t; typedef struct iflib_sw_tx_desc_val { bus_dmamap_t ifsd_map; /* bus_dma map for packet */ struct mbuf *ifsd_m; /* pkthdr mbuf */ uint8_t ifsd_flags; } *iflib_txsd_val_t; typedef struct iflib_sw_tx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ uint8_t *ifsd_flags; } iflib_txsd_array_t; /* magic number that should be high enough for any hardware */ #define IFLIB_MAX_TX_SEGS 128 #define IFLIB_MAX_RX_SEGS 32 #define IFLIB_RX_COPY_THRESH 128 #define IFLIB_MAX_RX_REFRESH 32 #define IFLIB_QUEUE_IDLE 0 #define IFLIB_QUEUE_HUNG 1 #define IFLIB_QUEUE_WORKING 2 /* this should really scale with ring size - 32 is a fairly arbitrary value for this */ #define TX_BATCH_SIZE 16 #define IFLIB_RESTART_BUDGET 8 #define IFC_LEGACY 0x01 #define IFC_QFLUSH 0x02 #define IFC_MULTISEG 0x04 #define IFC_DMAR 0x08 #define IFC_SC_ALLOCATED 0x10 #define IFC_INIT_DONE 0x20 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) struct iflib_txq { uint16_t ift_in_use; uint16_t ift_cidx; uint16_t ift_cidx_processed; uint16_t ift_pidx; uint8_t ift_gen; uint8_t ift_db_pending; uint8_t ift_db_pending_queued; uint8_t ift_npending; uint8_t ift_br_offset; /* implicit pad */ uint64_t ift_processed; uint64_t ift_cleaned; #if MEMORY_LOGGING uint64_t ift_enqueued; uint64_t ift_dequeued; #endif uint64_t ift_no_tx_dma_setup; uint64_t ift_no_desc_avail; uint64_t ift_mbuf_defrag_failed; uint64_t ift_mbuf_defrag; uint64_t ift_map_failed; uint64_t ift_txd_encap_efbig; uint64_t ift_pullups; struct mtx ift_mtx; struct mtx ift_db_mtx; /* constant values */ if_ctx_t ift_ctx; struct ifmp_ring **ift_br; struct grouptask ift_task; uint16_t ift_size; uint16_t ift_id; struct callout ift_timer; struct callout ift_db_check; iflib_txsd_array_t ift_sds; uint8_t ift_nbr; uint8_t ift_qstatus; uint8_t ift_active; uint8_t ift_closed; int ift_watchdog_time; struct iflib_filter_info ift_filter_info; bus_dma_tag_t ift_desc_tag; bus_dma_tag_t ift_tso_desc_tag; iflib_dma_info_t ift_ifdi; #define MTX_NAME_LEN 16 char ift_mtx_name[MTX_NAME_LEN]; char ift_db_mtx_name[MTX_NAME_LEN]; bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ift_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); struct iflib_fl { uint16_t ifl_cidx; uint16_t ifl_pidx; uint16_t ifl_credits; uint8_t ifl_gen; #if MEMORY_LOGGING uint64_t ifl_m_enqueued; uint64_t ifl_m_dequeued; uint64_t ifl_cl_enqueued; uint64_t ifl_cl_dequeued; #endif /* implicit pad */ /* constant */ uint16_t ifl_size; uint16_t ifl_buf_size; uint16_t ifl_cltype; uma_zone_t ifl_zone; iflib_rxsd_t ifl_sds; iflib_rxq_t ifl_rxq; uint8_t ifl_id; bus_dma_tag_t ifl_desc_tag; iflib_dma_info_t ifl_ifdi; uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; } __aligned(CACHE_LINE_SIZE); static inline int get_inuse(int size, int cidx, int pidx, int gen) { int used; if (pidx > cidx) used = pidx - cidx; else if (pidx < cidx) used = size - cidx + pidx; else if (gen == 0 && pidx == cidx) used = 0; else if (gen == 1 && pidx == cidx) used = size; else panic("bad state"); return (used); } #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) #define IDXDIFF(head, tail, wrap) \ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) struct iflib_rxq { /* If there is a separate completion queue - * these are the cq cidx and pidx. Otherwise * these are unused. */ uint16_t ifr_size; uint16_t ifr_cq_cidx; uint16_t ifr_cq_pidx; uint8_t ifr_cq_gen; uint8_t ifr_fl_offset; if_ctx_t ifr_ctx; iflib_fl_t ifr_fl; uint64_t ifr_rx_irq; uint16_t ifr_id; uint8_t ifr_lro_enabled; uint8_t ifr_nfl; struct lro_ctrl ifr_lc; struct grouptask ifr_task; struct iflib_filter_info ifr_filter_info; iflib_dma_info_t ifr_ifdi; /* dynamically allocate if any drivers need a value substantially larger than this */ struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ifr_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); /* * Only allow a single packet to take up most 1/nth of the tx ring */ #define MAX_SINGLE_PACKET_FRACTION 12 #define IF_BAD_DMA (bus_addr_t)-1 static int enable_msix = 1; #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) #define CTX_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_mtx, _name, "iflib ctx lock", MTX_DEF) #define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_mtx) #define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_mtx) #define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx) #define TXDB_LOCK_INIT(txq) mtx_init(&(txq)->ift_db_mtx, (txq)->ift_db_mtx_name, NULL, MTX_DEF) #define TXDB_TRYLOCK(txq) mtx_trylock(&(txq)->ift_db_mtx) #define TXDB_LOCK(txq) mtx_lock(&(txq)->ift_db_mtx) #define TXDB_UNLOCK(txq) mtx_unlock(&(txq)->ift_db_mtx) #define TXDB_LOCK_DESTROY(txq) mtx_destroy(&(txq)->ift_db_mtx) #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) /* Our boot-time initialization hook */ static int iflib_module_event_handler(module_t, int, void *); static moduledata_t iflib_moduledata = { "iflib", iflib_module_event_handler, NULL }; DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); MODULE_VERSION(iflib, 1); MODULE_DEPEND(iflib, pci, 1, 1, 1); MODULE_DEPEND(iflib, ether, 1, 1, 1); TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); TASKQGROUP_DEFINE(if_config_tqg, 1, 1); #ifndef IFLIB_DEBUG_COUNTERS #ifdef INVARIANTS #define IFLIB_DEBUG_COUNTERS 1 #else #define IFLIB_DEBUG_COUNTERS 0 #endif /* !INVARIANTS */ #endif static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, "iflib driver parameters"); /* * XXX need to ensure that this can't accidentally cause the head to be moved backwards */ static int iflib_min_tx_latency = 0; SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); #if IFLIB_DEBUG_COUNTERS static int iflib_tx_seen; static int iflib_tx_sent; static int iflib_tx_encap; static int iflib_rx_allocs; static int iflib_fl_refills; static int iflib_fl_refills_large; static int iflib_tx_frees; SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0, "# tx mbufs seen"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0, "# tx mbufs sent"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0, "# tx mbufs encapped"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0, "# tx frees"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0, "# rx allocations"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0, "# refills"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, &iflib_fl_refills_large, 0, "# large refills"); static int iflib_txq_drain_flushing; static int iflib_txq_drain_oactive; static int iflib_txq_drain_notready; static int iflib_txq_drain_encapfail; SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, &iflib_txq_drain_flushing, 0, "# drain flushes"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, &iflib_txq_drain_oactive, 0, "# drain oactives"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, &iflib_txq_drain_notready, 0, "# drain notready"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD, &iflib_txq_drain_encapfail, 0, "# drain encap fails"); static int iflib_encap_load_mbuf_fail; static int iflib_encap_txq_avail_fail; static int iflib_encap_txd_encap_fail; SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); static int iflib_task_fn_rxs; static int iflib_rx_intr_enables; static int iflib_fast_intrs; static int iflib_intr_link; static int iflib_intr_msix; static int iflib_rx_unavail; static int iflib_rx_ctx_inactive; static int iflib_rx_zero_len; static int iflib_rx_if_input; static int iflib_rx_mbuf_null; static int iflib_rxd_flush; static int iflib_verbose_debug; SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD, &iflib_intr_link, 0, "# intr link calls"); SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD, &iflib_intr_msix, 0, "# intr msix calls"); SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, &iflib_rx_intr_enables, 0, "# rx intr enables"); SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0, "# fast_intr calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0, "# times rxeof called with no available data"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD, &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input, 0, "# times rxeof called if_input"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0, "# times rxd_flush called"); SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, &iflib_verbose_debug, 0, "enable verbose debugging"); #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) static void iflib_debug_reset(void) { iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = iflib_txq_drain_flushing = iflib_txq_drain_oactive = iflib_txq_drain_notready = iflib_txq_drain_encapfail = iflib_encap_load_mbuf_fail = iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = iflib_intr_link = iflib_intr_msix = iflib_rx_unavail = iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input = iflib_rx_mbuf_null = iflib_rxd_flush = 0; } #else #define DBG_COUNTER_INC(name) static void iflib_debug_reset(void) {} #endif #define IFLIB_DEBUG 0 static void iflib_tx_structures_free(if_ctx_t ctx); static void iflib_rx_structures_free(if_ctx_t ctx); static int iflib_queues_alloc(if_ctx_t ctx); static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget); static int iflib_qset_structures_setup(if_ctx_t ctx); static int iflib_msix_init(if_ctx_t ctx); static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str); static void iflib_txq_check_drain(iflib_txq_t txq, int budget); static uint32_t iflib_txq_can_drain(struct ifmp_ring *); static int iflib_register(if_ctx_t); static void iflib_init_locked(if_ctx_t ctx); static void iflib_add_device_sysctl_pre(if_ctx_t ctx); static void iflib_add_device_sysctl_post(if_ctx_t ctx); static void iflib_ifmp_purge(iflib_txq_t txq); static void _iflib_pre_assert(if_softc_ctx_t scctx); #ifdef DEV_NETMAP #include #include #include MODULE_DEPEND(iflib, netmap, 1, 1, 1); /* * device-specific sysctl variables: * * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. * During regular operations the CRC is stripped, but on some * hardware reception of frames not multiple of 64 is slower, * so using crcstrip=0 helps in benchmarks. * * iflib_rx_miss, iflib_rx_miss_bufs: * count packets that might be missed due to lost interrupts. */ SYSCTL_DECL(_dev_netmap); /* * The xl driver by default strips CRCs and we do not override it. */ int iflib_crcstrip = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); int iflib_rx_miss, iflib_rx_miss_bufs; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); /* * Register/unregister. We are already under netmap lock. * Only called on the first register or the last unregister. */ static int iflib_netmap_register(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (!CTX_IS_VF(ctx)) IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); /* enable or disable flags and callbacks in na and ifp */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } IFDI_INIT(ctx); IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? CTX_UNLOCK(ctx); return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); } /* * Reconcile kernel and user view of the transmit ring. * * All information is in the kring. * Userspace wants to send packets up to the one before kring->rhead, * kernel knows kring->nr_hwcur is the first unsent packet. * * Here we push packets out (as many as possible), and possibly * reclaim buffers from previously completed transmission. * * The caller (netmap) guarantees that there is only one instance * running at any time. Any interference with other driver * methods should be handled by the individual drivers. */ static int iflib_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct if_pkt_info pi; /* * interrupts on every tx packet are expensive so request * them every half ring, or where NS_REPORT is set */ u_int report_frequency = kring->nkr_num_slots >> 1; /* device-specific */ if_ctx_t ctx = ifp->if_softc; iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; pi.ipi_segs = txq->ift_segs; pi.ipi_qsidx = kring->ring_id; pi.ipi_ndescs = 0; bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: process new packets to send. * nm_i is the current index in the netmap ring, * nic_i is the corresponding index in the NIC ring. * * If we have packets to send (nm_i != head) * iterate over the netmap ring, fetch length and update * the corresponding slot in the NIC ring. Some drivers also * need to update the buffer's physical address in the NIC slot * even NS_BUF_CHANGED is not set (PNMB computes the addresses). * * The netmap_reload_map() calls is especially expensive, * even when (as in this case) the tag is 0, so do only * when the buffer has actually changed. * * If possible do not set the report/intr bit on all slots, * but only a few times per ring or when NS_REPORT is set. * * Finally, on 10G and faster drivers, it might be useful * to prefetch the next slot and txr entry. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ nic_i = netmap_idx_k2n(kring, nm_i); __builtin_prefetch(&ring->slot[nm_i]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int flags = (slot->flags & NS_REPORT || nic_i == 0 || nic_i == report_frequency) ? IPI_TX_INTR : 0; /* device-specific */ pi.ipi_pidx = nic_i; pi.ipi_flags = flags; /* Fill the slot in the NIC ring. */ ctx->isc_txd_encap(ctx->ifc_softc, &pi); /* prefetch for next round */ __builtin_prefetch(&ring->slot[nm_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); NM_CHECK_ADDR_LEN(na, addr, len); if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); } slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); /* make sure changes to the buffer are synced */ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], BUS_DMASYNC_PREWRITE); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = head; /* synchronize the NIC ring */ bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the tx unit up to slot nic_i (excluded) */ ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); } /* * Second part: reclaim buffers for completed transmissions. */ if (iflib_tx_credits_update(ctx, txq)) { /* some tx completed, increment avail */ nic_i = txq->ift_cidx_processed; kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); } return (0); } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient. * The caller guarantees a single invocations, but races against * the rest of the driver should be handled here. * * On call, kring->rhead is the first packet that userspace wants * to keep, and kring->rcur is the wakeup point. * The kernel has previously reported packets up to kring->rtail. * * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective * of whether or not we received an interrupt. */ static int iflib_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; struct if_rxd_info ri; /* device-specific */ if_ctx_t ctx = ifp->if_softc; iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; iflib_fl_t fl = rxq->ifr_fl; if (head > lim) return netmap_ring_reinit(kring); bzero(&ri, sizeof(ri)); ri.iri_qsidx = kring->ring_id; ri.iri_ifp = ctx->ifc_ifp; /* XXX check sync modes */ for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: import newly received packets. * * nm_i is the index of the next free slot in the netmap ring, * nic_i is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * nic_i = rxr->next_check; * nm_i = kring->nr_hwtail (previous) * and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size * * rxr->next_check is set to 0 on a ring reinit */ if (netmap_no_pendintr || force_update) { int crclen = iflib_crcstrip ? 0 : 4; int error, avail; uint16_t slot_flags = kring->nkr_slot_flags; for (fl = rxq->ifr_fl, i = 0; i < rxq->ifr_nfl; i++, fl++) { nic_i = fl->ifl_cidx; nm_i = netmap_idx_n2k(kring, nic_i); avail = ctx->isc_rxd_available(ctx->ifc_softc, kring->ring_id, nic_i, INT_MAX); for (n = 0; avail > 0; n++, avail--) { error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); if (error) ring->slot[nm_i].len = 0; else ring->slot[nm_i].len = ri.iri_len - crclen; ring->slot[nm_i].flags = slot_flags; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, BUS_DMASYNC_POSTREAD); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } if (n) { /* update the state variables */ if (netmap_no_pendintr && !force_update) { /* diagnostics */ iflib_rx_miss ++; iflib_rx_miss_bufs += n; } fl->ifl_cidx = nic_i; kring->nr_hwtail = nm_i; } kring->nr_kflags &= ~NKR_PENDINTR; } } /* * Second part: skip past packets that userspace has released. * (kring->nr_hwcur to head excluded), * and make the buffers available for reception. * As usual nm_i is the index in the netmap ring, * nic_i is the index in the NIC ring, and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size */ /* XXX not sure how this will work with multiple free lists */ nm_i = kring->nr_hwcur; if (nm_i != head) { nic_i = netmap_idx_k2n(kring, nm_i); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; uint64_t paddr; caddr_t vaddr; void *addr = PNMB(na, slot, &paddr); if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ goto ring_reset; vaddr = addr; if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, addr); slot->flags &= ~NS_BUF_CHANGED; } /* * XXX we should be batching this operation - TODO */ ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1, fl->ifl_buf_size); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, BUS_DMASYNC_PREREAD); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = head; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * IMPORTANT: we must leave one free slot in the ring, * so move nic_i back by one unit */ nic_i = nm_prev(nic_i, lim); ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); } return 0; ring_reset: return netmap_ring_reinit(kring); } static int iflib_netmap_attach(if_ctx_t ctx) { struct netmap_adapter na; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; bzero(&na, sizeof(na)); na.ifp = ctx->ifc_ifp; na.na_flags = NAF_BDG_MAYSLEEP; MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); na.num_tx_desc = scctx->isc_ntxd[0]; na.num_rx_desc = scctx->isc_nrxd[0]; na.nm_txsync = iflib_netmap_txsync; na.nm_rxsync = iflib_netmap_rxsync; na.nm_register = iflib_netmap_register; na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; return (netmap_attach(&na)); } static void iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_slot *slot; slot = netmap_reset(na, NR_TX, txq->ift_id, 0); if (slot == 0) return; for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); } } static void iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_slot *slot; iflib_rxsd_t sd; int nrxd; slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); if (slot == 0) return; sd = rxq->ifr_fl[0].ifl_sds; nrxd = ctx->ifc_softc_ctx.isc_nrxd[0]; for (int i = 0; i < nrxd; i++, sd++) { int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i); uint64_t paddr; void *addr; caddr_t vaddr; vaddr = addr = PNMB(na, slot + sj, &paddr); netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, sd->ifsd_map, addr); /* Update descriptor and the cached value */ ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1, rxq->ifr_fl[0].ifl_buf_size); } /* preserve queue */ if (ctx->ifc_ifp->if_capenable & IFCAP_NETMAP) { struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, t); } else ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, nrxd-1); } #define iflib_netmap_detach(ifp) netmap_detach(ifp) #else #define iflib_netmap_txq_init(ctx, txq) #define iflib_netmap_rxq_init(ctx, rxq) #define iflib_netmap_detach(ifp) #define iflib_netmap_attach(ctx) (0) #define netmap_rx_irq(ifp, qid, budget) (0) #endif #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) #endif static void _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { if (err) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) { int err; if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ sctx->isc_q_align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->idi_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", __func__, err); goto fail_0; } err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); if (err) { device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, err); goto fail_1; } dma->idi_paddr = IF_BAD_DMA; err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); if (err || dma->idi_paddr == IF_BAD_DMA) { device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err); goto fail_2; } dma->idi_size = size; return (0); fail_2: bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); fail_1: bus_dma_tag_destroy(dma->idi_tag); fail_0: dma->idi_tag = NULL; return (err); } int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) { int i, err; iflib_dma_info_t *dmaiter; dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) { if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) break; } if (err) iflib_dma_free_multi(dmalist, i); return (err); } void iflib_dma_free(iflib_dma_info_t dma) { if (dma->idi_tag == NULL) return; if (dma->idi_paddr != IF_BAD_DMA) { bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->idi_tag, dma->idi_map); dma->idi_paddr = IF_BAD_DMA; } if (dma->idi_vaddr != NULL) { bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); dma->idi_vaddr = NULL; } bus_dma_tag_destroy(dma->idi_tag); dma->idi_tag = NULL; } void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) { int i; iflib_dma_info_t *dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) iflib_dma_free(*dmaiter); } +#ifdef EARLY_AP_STARTUP +static const int iflib_started = 1; +#else +/* + * We used to abuse the smp_started flag to decide if the queues have been + * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). + * That gave bad races, since the SYSINIT() runs strictly after smp_started + * is set. Run a SYSINIT() strictly after that to just set a usable + * completion flag. + */ + +static int iflib_started; + +static void +iflib_record_started(void *arg) +{ + iflib_started = 1; +} + +SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, + iflib_record_started, NULL); +#endif + static int iflib_fast_intr(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; - if (!smp_started && mp_ncpus > 1) + if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, driver_intr_t handler, void *arg, char *name) { int rc; struct resource *res; void *tag; device_t dev = ctx->ifc_dev; MPASS(rid < 512); irq->ii_rid = rid; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) { device_printf(dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } irq->ii_res = res; KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, filter, handler, arg, &tag); if (rc != 0) { device_printf(dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name ? name : "unknown", rc); return (rc); } else if (name) bus_describe_intr(dev, res, tag, "%s", name); irq->ii_tag = tag; return (0); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int iflib_txsd_alloc(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int err, nsegments, ntsosegments; nsegments = scctx->isc_tx_nsegments; ntsosegments = scctx->isc_tx_tso_segments_max; MPASS(scctx->isc_ntxd[0] > 0); MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); MPASS(nsegments > 0); MPASS(ntsosegments > 0); /* * Setup DMA descriptor areas. */ if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_tx_maxsize, /* maxsize */ nsegments, /* nsegments */ sctx->isc_tx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_desc_tag))) { device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); device_printf(dev,"maxsize: %zd nsegments: %d maxsegsize: %zd\n", sctx->isc_tx_maxsize, nsegments, sctx->isc_tx_maxsegsize); goto fail; } if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ scctx->isc_tx_tso_size_max, /* maxsize */ ntsosegments, /* nsegments */ scctx->isc_tx_tso_segsize_max, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_tso_desc_tag))) { device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); goto fail; } if (!(txq->ift_sds.ifsd_flags = (uint8_t *) malloc(sizeof(uint8_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(txq->ift_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ #if defined(ACPI_DMAR) || (!(defined(__i386__) && !defined(__amd64__))) if ((ctx->ifc_flags & IFC_DMAR) == 0) return (0); if (!(txq->ift_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } #endif return (0); fail: /* We free all, it handles case where we are in the middle */ iflib_tx_structures_free(ctx); return (err); } static void iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) { bus_dmamap_t map; map = NULL; if (txq->ift_sds.ifsd_map != NULL) map = txq->ift_sds.ifsd_map[i]; if (map != NULL) { bus_dmamap_unload(txq->ift_desc_tag, map); bus_dmamap_destroy(txq->ift_desc_tag, map); txq->ift_sds.ifsd_map[i] = NULL; } } static void iflib_txq_destroy(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; for (int i = 0; i < txq->ift_size; i++) iflib_txsd_destroy(ctx, txq, i); if (txq->ift_sds.ifsd_map != NULL) { free(txq->ift_sds.ifsd_map, M_IFLIB); txq->ift_sds.ifsd_map = NULL; } if (txq->ift_sds.ifsd_m != NULL) { free(txq->ift_sds.ifsd_m, M_IFLIB); txq->ift_sds.ifsd_m = NULL; } if (txq->ift_sds.ifsd_flags != NULL) { free(txq->ift_sds.ifsd_flags, M_IFLIB); txq->ift_sds.ifsd_flags = NULL; } if (txq->ift_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_desc_tag); txq->ift_desc_tag = NULL; } if (txq->ift_tso_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_tso_desc_tag); txq->ift_tso_desc_tag = NULL; } } static void iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) { struct mbuf **mp; mp = &txq->ift_sds.ifsd_m[i]; if (*mp == NULL) return; if (txq->ift_sds.ifsd_map != NULL) { bus_dmamap_sync(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i]); } m_free(*mp); DBG_COUNTER_INC(tx_frees); *mp = NULL; } static int iflib_txq_setup(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_dma_info_t di; int i; /* Set number of descriptors available */ txq->ift_qstatus = IFLIB_QUEUE_IDLE; /* Reset indices */ txq->ift_cidx_processed = txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) bzero((void *)di->idi_vaddr, di->idi_size); IFDI_TXQ_SETUP(ctx, txq->ift_id); for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int iflib_rxsd_alloc(iflib_rxq_t rxq) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; iflib_fl_t fl; iflib_rxsd_t rxsd; int err; MPASS(scctx->isc_nrxd[0] > 0); MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); fl = rxq->ifr_fl; for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { fl->ifl_sds = malloc(sizeof(struct iflib_sw_rx_desc) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_WAITOK | M_ZERO); if (fl->ifl_sds == NULL) { device_printf(dev, "Unable to allocate rx sw desc memory\n"); return (ENOMEM); } fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_rx_maxsize, /* maxsize */ sctx->isc_rx_nsegments, /* nsegments */ sctx->isc_rx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &fl->ifl_desc_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, err); goto fail; } rxsd = fl->ifl_sds; for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++, rxsd++) { err = bus_dmamap_create(fl->ifl_desc_tag, 0, &rxsd->ifsd_map); if (err) { device_printf(dev, "%s: bus_dmamap_create failed: %d\n", __func__, err); goto fail; } } } return (0); fail: iflib_rx_structures_free(ctx); return (err); } /* * Internal service routines */ struct rxq_refill_cb_arg { int error; bus_dma_segment_t seg; int nseg; }; static void _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rxq_refill_cb_arg *cb_arg = arg; cb_arg->error = error; cb_arg->seg = segs[0]; cb_arg->nseg = nseg; } #ifdef ACPI_DMAR #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) #else #define IS_DMAR(ctx) (0) #endif /** * rxq_refill - refill an rxq free-buffer list * @ctx: the iflib context * @rxq: the free-list to refill * @n: the number of new buffers to allocate * * (Re)populate an rxq free-buffer list with up to @n new packet buffers. * The caller must assure that @n does not exceed the queue's capacity. */ static void _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct mbuf *m; int pidx = fl->ifl_pidx; iflib_rxsd_t rxsd = &fl->ifl_sds[pidx]; caddr_t cl; int n, i = 0; uint64_t bus_addr; int err; n = count; MPASS(n > 0); MPASS(fl->ifl_credits + n <= fl->ifl_size); if (pidx < fl->ifl_cidx) MPASS(pidx + n <= fl->ifl_cidx); if (pidx == fl->ifl_cidx && (fl->ifl_credits < fl->ifl_size)) MPASS(fl->ifl_gen == 0); if (pidx > fl->ifl_cidx) MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); DBG_COUNTER_INC(fl_refills); if (n > 8) DBG_COUNTER_INC(fl_refills_large); while (n--) { /* * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. * * If the cluster is still set then we know a minimum sized packet was received */ if ((cl = rxsd->ifsd_cl) == NULL) { if ((cl = rxsd->ifsd_cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) break; #if MEMORY_LOGGING fl->ifl_cl_enqueued++; #endif } if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { break; } #if MEMORY_LOGGING fl->ifl_m_enqueued++; #endif DBG_COUNTER_INC(rx_allocs); #ifdef notyet if ((rxsd->ifsd_flags & RX_SW_DESC_MAP_CREATED) == 0) { int err; if ((err = bus_dmamap_create(fl->ifl_ifdi->idi_tag, 0, &rxsd->ifsd_map))) { log(LOG_WARNING, "bus_dmamap_create failed %d\n", err); uma_zfree(fl->ifl_zone, cl); n = 0; goto done; } rxsd->ifsd_flags |= RX_SW_DESC_MAP_CREATED; } #endif #if defined(__i386__) || defined(__amd64__) if (!IS_DMAR(ctx)) { bus_addr = pmap_kextract((vm_offset_t)cl); } else #endif { struct rxq_refill_cb_arg cb_arg; iflib_rxq_t q; cb_arg.error = 0; q = fl->ifl_rxq; err = bus_dmamap_load(fl->ifl_desc_tag, rxsd->ifsd_map, cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); if (err != 0 || cb_arg.error) { /* * !zone_pack ? */ if (fl->ifl_zone == zone_pack) uma_zfree(fl->ifl_zone, cl); m_free(m); n = 0; goto done; } bus_addr = cb_arg.seg.ds_addr; } rxsd->ifsd_flags |= RX_SW_DESC_INUSE; MPASS(rxsd->ifsd_m == NULL); rxsd->ifsd_cl = cl; rxsd->ifsd_m = m; fl->ifl_bus_addrs[i] = bus_addr; fl->ifl_vm_addrs[i] = cl; rxsd++; fl->ifl_credits++; i++; MPASS(fl->ifl_credits <= fl->ifl_size); if (++fl->ifl_pidx == fl->ifl_size) { fl->ifl_pidx = 0; fl->ifl_gen = 1; rxsd = fl->ifl_sds; } if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { ctx->isc_rxd_refill(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx, fl->ifl_bus_addrs, fl->ifl_vm_addrs, i, fl->ifl_buf_size); i = 0; pidx = fl->ifl_pidx; } } done: DBG_COUNTER_INC(rxd_flush); if (fl->ifl_pidx == 0) pidx = fl->ifl_size - 1; else pidx = fl->ifl_pidx - 1; ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); } static __inline void __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) { /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; #ifdef INVARIANTS int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; #endif MPASS(fl->ifl_credits <= fl->ifl_size); MPASS(reclaimable == delta); if (reclaimable > 0) _iflib_fl_refill(ctx, fl, min(max, reclaimable)); } static void iflib_fl_bufs_free(iflib_fl_t fl) { iflib_dma_info_t idi = fl->ifl_ifdi; uint32_t i; for (i = 0; i < fl->ifl_size; i++) { iflib_rxsd_t d = &fl->ifl_sds[i]; if (d->ifsd_flags & RX_SW_DESC_INUSE) { bus_dmamap_unload(fl->ifl_desc_tag, d->ifsd_map); bus_dmamap_destroy(fl->ifl_desc_tag, d->ifsd_map); if (d->ifsd_m != NULL) { m_init(d->ifsd_m, M_NOWAIT, MT_DATA, 0); uma_zfree(zone_mbuf, d->ifsd_m); } if (d->ifsd_cl != NULL) uma_zfree(fl->ifl_zone, d->ifsd_cl); d->ifsd_flags = 0; } else { MPASS(d->ifsd_cl == NULL); MPASS(d->ifsd_m == NULL); } #if MEMORY_LOGGING fl->ifl_m_dequeued++; fl->ifl_cl_dequeued++; #endif d->ifsd_cl = NULL; d->ifsd_m = NULL; } /* * Reset free list values */ fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = 0;; bzero(idi->idi_vaddr, idi->idi_size); } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int iflib_fl_setup(iflib_fl_t fl) { iflib_rxq_t rxq = fl->ifl_rxq; if_ctx_t ctx = rxq->ifr_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; /* ** Free current RX buffer structs and their mbufs */ iflib_fl_bufs_free(fl); /* Now replenish the mbufs */ MPASS(fl->ifl_credits == 0); /* * XXX don't set the max_frame_size to larger * than the hardware can handle */ if (sctx->isc_max_frame_size <= 2048) fl->ifl_buf_size = MCLBYTES; else if (sctx->isc_max_frame_size <= 4096) fl->ifl_buf_size = MJUMPAGESIZE; else if (sctx->isc_max_frame_size <= 9216) fl->ifl_buf_size = MJUM9BYTES; else fl->ifl_buf_size = MJUM16BYTES; if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; fl->ifl_cltype = m_gettype(fl->ifl_buf_size); fl->ifl_zone = m_getzone(fl->ifl_buf_size); /* avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach */ _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); MPASS(min(128, fl->ifl_size) == fl->ifl_credits); if (min(128, fl->ifl_size) != fl->ifl_credits) return (ENOBUFS); /* * handle failure */ MPASS(rxq != NULL); MPASS(fl->ifl_ifdi != NULL); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void iflib_rx_sds_free(iflib_rxq_t rxq) { iflib_fl_t fl; int i; if (rxq->ifr_fl != NULL) { for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; if (fl->ifl_desc_tag != NULL) { bus_dma_tag_destroy(fl->ifl_desc_tag); fl->ifl_desc_tag = NULL; } } if (rxq->ifr_fl->ifl_sds != NULL) free(rxq->ifr_fl->ifl_sds, M_IFLIB); free(rxq->ifr_fl, M_IFLIB); rxq->ifr_fl = NULL; rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } } /* * MI independent logic * */ static void iflib_timer(void *arg) { iflib_txq_t txq = arg; if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ IFDI_TIMER(ctx, txq->ift_id); if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && (ctx->ifc_pause_frames == 0)) goto hung; if (TXQ_AVAIL(txq) <= 2*scctx->isc_tx_nsegments || ifmp_ring_is_stalled(txq->ift_br[0])) GROUPTASK_ENQUEUE(&txq->ift_task); ctx->ifc_pause_frames = 0; if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); return; hung: CTX_LOCK(ctx); if_setdrvflagbits(ctx->ifc_ifp, 0, IFF_DRV_RUNNING); device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); IFDI_WATCHDOG_RESET(ctx); ctx->ifc_watchdog_events++; ctx->ifc_pause_frames = 0; iflib_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_init_locked(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_t ifp = ctx->ifc_ifp; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); /* Set hardware offload abilities */ if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, tx_ip_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); callout_stop(&txq->ift_db_check); CALLOUT_UNLOCK(txq); iflib_netmap_txq_init(ctx, txq); } for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { iflib_netmap_rxq_init(ctx, rxq); } #ifdef INVARIANTS i = if_getdrvflags(ifp); #endif IFDI_INIT(ctx); MPASS(if_getdrvflags(ifp) == i); for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { if (iflib_fl_setup(fl)) { device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); goto done; } } } done: if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); IFDI_INTR_ENABLE(ctx); txq = ctx->ifc_txqs; for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); } static int iflib_media_change(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); int err; CTX_LOCK(ctx); if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) iflib_init_locked(ctx); CTX_UNLOCK(ctx); return (err); } static void iflib_media_status(if_t ifp, struct ifmediareq *ifmr) { if_ctx_t ctx = if_getsoftc(ifp); CTX_LOCK(ctx); IFDI_UPDATE_ADMIN_STATUS(ctx); IFDI_MEDIA_STATUS(ctx, ifmr); CTX_UNLOCK(ctx); } static void iflib_stop(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_dma_info_t di; iflib_fl_t fl; int i, j; /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); DELAY(100000); IFDI_STOP(ctx); DELAY(100000); iflib_debug_reset(); /* Wait for current tx queue users to exit to disarm watchdog timer. */ for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { /* make sure all transmitters have completed before proceeding XXX */ /* clean any enqueued buffers */ iflib_ifmp_purge(txq); /* Free any existing tx buffers. */ for (j = 0; j < txq->ift_size; j++) { iflib_txsd_free(ctx, txq, j); } txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; txq->ift_pullups = 0; ifmp_ring_reset_stats(txq->ift_br[0]); for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); } for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { /* make sure all transmitters have completed before proceeding XXX */ for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwrxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); /* also resets the free lists pidx/cidx */ for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) iflib_fl_bufs_free(fl); } } static iflib_rxsd_t rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload) { int flid, cidx; iflib_rxsd_t sd; iflib_fl_t fl; iflib_dma_info_t di; flid = irf->irf_flid; cidx = irf->irf_idx; fl = &rxq->ifr_fl[flid]; fl->ifl_credits--; #if MEMORY_LOGGING fl->ifl_m_dequeued++; if (cltype) fl->ifl_cl_dequeued++; #endif sd = &fl->ifl_sds[cidx]; di = fl->ifl_ifdi; bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* not valid assert if bxe really does SGE from non-contiguous elements */ MPASS(fl->ifl_cidx == cidx); if (unload) bus_dmamap_unload(fl->ifl_desc_tag, sd->ifsd_map); if (__predict_false(++fl->ifl_cidx == fl->ifl_size)) { fl->ifl_cidx = 0; fl->ifl_gen = 0; } /* YES ick */ if (cltype) *cltype = fl->ifl_cltype; return (sd); } static struct mbuf * assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri) { int i, padlen , flags, cltype; struct mbuf *m, *mh, *mt; iflib_rxsd_t sd; caddr_t cl; i = 0; mh = NULL; do { sd = rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE); MPASS(sd->ifsd_cl != NULL); MPASS(sd->ifsd_m != NULL); /* Don't include zero-length frags */ if (ri->iri_frags[i].irf_len == 0) { /* XXX we can save the cluster here, but not the mbuf */ m_init(sd->ifsd_m, M_NOWAIT, MT_DATA, 0); m_free(sd->ifsd_m); sd->ifsd_m = NULL; continue; } m = sd->ifsd_m; if (mh == NULL) { flags = M_PKTHDR|M_EXT; mh = mt = m; padlen = ri->iri_pad; } else { flags = M_EXT; mt->m_next = m; mt = m; /* assuming padding is only on the first fragment */ padlen = 0; } sd->ifsd_m = NULL; cl = sd->ifsd_cl; sd->ifsd_cl = NULL; /* Can these two be made one ? */ m_init(m, M_NOWAIT, MT_DATA, flags); m_cljset(m, cl, cltype); /* * These must follow m_init and m_cljset */ m->m_data += padlen; ri->iri_len -= padlen; m->m_len = ri->iri_frags[i].irf_len; } while (++i < ri->iri_nfrags); return (mh); } /* * Process one software descriptor */ static struct mbuf * iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) { struct mbuf *m; iflib_rxsd_t sd; /* should I merge this back in now that the two paths are basically duplicated? */ if (ri->iri_nfrags == 1 && ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) { sd = rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE); m = sd->ifsd_m; sd->ifsd_m = NULL; m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); memcpy(m->m_data, sd->ifsd_cl, ri->iri_len); m->m_len = ri->iri_frags[0].irf_len; } else { m = assemble_segments(rxq, ri); } m->m_pkthdr.len = ri->iri_len; m->m_pkthdr.rcvif = ri->iri_ifp; m->m_flags |= ri->iri_flags; m->m_pkthdr.ether_vtag = ri->iri_vtag; m->m_pkthdr.flowid = ri->iri_flowid; M_HASHTYPE_SET(m, ri->iri_rsstype); m->m_pkthdr.csum_flags = ri->iri_csum_flags; m->m_pkthdr.csum_data = ri->iri_csum_data; return (m); } static bool iflib_rxeof(iflib_rxq_t rxq, int budget) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int avail, i; uint16_t *cidxp; struct if_rxd_info ri; int err, budget_left, rx_bytes, rx_pkts; iflib_fl_t fl; struct ifnet *ifp; int lro_enabled; /* * XXX early demux data packets so that if_input processing only handles * acks in interrupt context */ struct mbuf *m, *mh, *mt; if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &budget)) { return (FALSE); } mh = mt = NULL; MPASS(budget > 0); rx_pkts = rx_bytes = 0; if (sctx->isc_flags & IFLIB_HAS_RXCQ) cidxp = &rxq->ifr_cq_cidx; else cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); DBG_COUNTER_INC(rx_unavail); return (false); } for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { if (__predict_false(!CTX_ACTIVE(ctx))) { DBG_COUNTER_INC(rx_ctx_inactive); break; } /* * Reset client set fields to their default values */ bzero(&ri, sizeof(ri)); ri.iri_qsidx = rxq->ifr_id; ri.iri_cidx = *cidxp; ri.iri_ifp = ctx->ifc_ifp; ri.iri_frags = rxq->ifr_frags; err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); /* in lieu of handling correctly - make sure it isn't being unhandled */ MPASS(err == 0); if (sctx->isc_flags & IFLIB_HAS_RXCQ) { *cidxp = ri.iri_cidx; /* Update our consumer index */ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; rxq->ifr_cq_gen = 0; } /* was this only a completion queue message? */ if (__predict_false(ri.iri_nfrags == 0)) continue; } MPASS(ri.iri_nfrags != 0); MPASS(ri.iri_len != 0); /* will advance the cidx on the corresponding free lists */ m = iflib_rxd_pkt_get(rxq, &ri); if (avail == 0 && budget_left) avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); if (__predict_false(m == NULL)) { DBG_COUNTER_INC(rx_mbuf_null); continue; } /* imm_pkt: -- cxgb */ if (mh == NULL) mh = mt = m; else { mt->m_nextpkt = m; mt = m; } } /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); ifp = ctx->ifc_ifp; lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); while (mh != NULL) { m = mh; mh = mh->m_nextpkt; m->m_nextpkt = NULL; rx_bytes += m->m_pkthdr.len; rx_pkts++; #if defined(INET6) || defined(INET) if (lro_enabled && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) continue; #endif DBG_COUNTER_INC(rx_if_input); ifp->if_input(ifp, m); } if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); /* * Flush any outstanding LRO work */ #if defined(INET6) || defined(INET) tcp_lro_flush_all(&rxq->ifr_lc); #endif if (avail) return true; return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); } #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) #define TXQ_MAX_DB_DEFERRED(size) (size >> 5) #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) static __inline void iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring) { uint32_t dbval; if (ring || txq->ift_db_pending >= TXQ_MAX_DB_DEFERRED(txq->ift_size)) { /* the lock will only ever be contended in the !min_latency case */ if (!TXDB_TRYLOCK(txq)) return; dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); txq->ift_db_pending = txq->ift_npending = 0; TXDB_UNLOCK(txq); } } static void iflib_txd_deferred_db_check(void * arg) { iflib_txq_t txq = arg; /* simple non-zero boolean so use bitwise OR */ if ((txq->ift_db_pending | txq->ift_npending) && txq->ift_db_pending >= txq->ift_db_pending_queued) iflib_txd_db_check(txq->ift_ctx, txq, TRUE); txq->ift_db_pending_queued = 0; if (ifmp_ring_is_stalled(txq->ift_br[0])) iflib_txq_check_drain(txq, 4); } #ifdef PKT_DEBUG static void print_pkt(if_pkt_info_t pi) { printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); } #endif #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) static int iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) { if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; struct ether_vlan_header *eh; struct mbuf *m, *n; n = m = *mp; if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && M_WRITABLE(m) == 0) { if ((m = m_dup(m, M_NOWAIT)) == NULL) { return (ENOMEM); } else { m_freem(*mp); n = *mp = m; } } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ if (__predict_false(m->m_len < sizeof(*eh))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) return (ENOMEM); } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { pi->ipi_etype = ntohs(eh->evl_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { pi->ipi_etype = ntohs(eh->evl_encap_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN; } switch (pi->ipi_etype) { #ifdef INET case ETHERTYPE_IP: { struct ip *ip = NULL; struct tcphdr *th = NULL; int minthlen; minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); if (__predict_false(m->m_len < minthlen)) { /* * if this code bloat is causing too much of a hit * move it to a separate function and mark it noinline */ if (m->m_len == pi->ipi_ehdrlen) { n = m->m_next; MPASS(n); if (n->m_len >= sizeof(*ip)) { ip = (struct ip *)n->m_data; if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); } } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } } else { ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } pi->ipi_ip_hlen = ip->ip_hl << 2; pi->ipi_ipproto = ip->ip_p; pi->ipi_flags |= IPI_TX_IPV4; if (pi->ipi_csum_flags & CSUM_IP) ip->ip_sum = 0; if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(th == NULL)) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) return (ENOMEM); th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (IS_TSO4(pi)) { if (__predict_false(ip->ip_p != IPPROTO_TCP)) return (ENXIO); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { ip->ip_sum = 0; ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); } } break; } #endif #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); struct tcphdr *th; pi->ipi_ip_hlen = sizeof(struct ip6_hdr); if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) return (ENOMEM); } th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); /* XXX-BZ this will go badly in case of ext hdrs. */ pi->ipi_ipproto = ip6->ip6_nxt; pi->ipi_flags |= IPI_TX_IPV6; if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) return (ENOMEM); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; } if (IS_TSO6(pi)) { if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) return (ENXIO); /* * The corresponding flag is set by the stack in the IPv4 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). * So, set it here because the rest of the flow requires it. */ pi->ipi_csum_flags |= CSUM_TCP_IPV6; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; } break; } #endif default: pi->ipi_csum_flags &= ~CSUM_OFFLOAD; pi->ipi_ip_hlen = 0; break; } *mp = m; return (0); } static __noinline struct mbuf * collapse_pkthdr(struct mbuf *m0) { struct mbuf *m, *m_next, *tmp; m = m0; m_next = m->m_next; while (m_next != NULL && m_next->m_len == 0) { m = m_next; m->m_next = NULL; m_free(m); m_next = m_next->m_next; } m = m0; m->m_next = m_next; if ((m_next->m_flags & M_EXT) == 0) { m = m_defrag(m, M_NOWAIT); } else { tmp = m_next->m_next; memcpy(m_next, m, MPKTHSIZE); m = m_next; m->m_next = tmp; } return (m); } /* * If dodgy hardware rejects the scatter gather chain we've handed it * we'll need to remove the mbuf chain from ifsg_m[] before we can add the * m_defrag'd mbufs */ static __noinline struct mbuf * iflib_remove_mbuf(iflib_txq_t txq) { int ntxd, i, pidx; struct mbuf *m, *mh, **ifsd_m; pidx = txq->ift_pidx; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; mh = m = ifsd_m[pidx]; ifsd_m[pidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif i = 1; while (m) { ifsd_m[(pidx + i) & (ntxd -1)] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif m = m->m_next; i++; } return (mh); } static int iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, int max_segs, int flags) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; int i, next, pidx, mask, err, maxsegsz, ntxd, count; struct mbuf *m, *tmp, **ifsd_m, **mp; m = *m0; /* * Please don't ever do this */ if (__predict_false(m->m_len == 0)) *m0 = m = collapse_pkthdr(m); ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; pidx = txq->ift_pidx; if (map != NULL) { uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; err = bus_dmamap_load_mbuf_sg(tag, map, *m0, segs, nsegs, BUS_DMA_NOWAIT); if (err) return (err); ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; i = 0; next = pidx; mask = (txq->ift_size-1); m = *m0; do { mp = &ifsd_m[next]; *mp = m; m = m->m_next; if (__predict_false((*mp)->m_len == 0)) { m_free(*mp); *mp = NULL; } else next = (pidx + i) & (ntxd-1); } while (m != NULL); } else { int buflen, sgsize, max_sgsize; vm_offset_t vaddr; vm_paddr_t curaddr; count = i = 0; maxsegsz = sctx->isc_tx_maxsize; m = *m0; do { if (__predict_false(m->m_len <= 0)) { tmp = m; m = m->m_next; tmp->m_next = NULL; m_free(tmp); continue; } buflen = m->m_len; vaddr = (vm_offset_t)m->m_data; /* * see if we can't be smarter about physically * contiguous mappings */ next = (pidx + count) & (ntxd-1); MPASS(ifsd_m[next] == NULL); #if MEMORY_LOGGING txq->ift_enqueued++; #endif ifsd_m[next] = m; while (buflen > 0) { max_sgsize = MIN(buflen, maxsegsz); curaddr = pmap_kextract(vaddr); sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); sgsize = MIN(sgsize, max_sgsize); segs[i].ds_addr = curaddr; segs[i].ds_len = sgsize; vaddr += sgsize; buflen -= sgsize; i++; if (i >= max_segs) goto err; } count++; tmp = m; m = m->m_next; } while (m != NULL); *nsegs = i; } return (0); err: *m0 = iflib_remove_mbuf(txq); return (EFBIG); } static int iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; bus_dma_segment_t *segs; struct mbuf *m_head; bus_dmamap_t map; struct if_pkt_info pi; int remap = 0; int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; bus_dma_tag_t desc_tag; segs = txq->ift_segs; ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; segs = txq->ift_segs; ntxd = txq->ift_size; m_head = *m_headp; map = NULL; /* * If we're doing TSO the next descriptor to clean may be quite far ahead */ cidx = txq->ift_cidx; pidx = txq->ift_pidx; next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); /* prefetch the next cache line of mbuf pointers and flags */ prefetch(&txq->ift_sds.ifsd_m[next]); if (txq->ift_sds.ifsd_map != NULL) { prefetch(&txq->ift_sds.ifsd_map[next]); map = txq->ift_sds.ifsd_map[pidx]; next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); prefetch(&txq->ift_sds.ifsd_flags[next]); } if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { desc_tag = txq->ift_tso_desc_tag; max_segs = scctx->isc_tx_tso_segments_max; } else { desc_tag = txq->ift_desc_tag; max_segs = scctx->isc_tx_nsegments; } m_head = *m_headp; bzero(&pi, sizeof(pi)); pi.ipi_len = m_head->m_pkthdr.len; pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; pi.ipi_pidx = pidx; pi.ipi_qsidx = txq->ift_id; /* deliberate bitwise OR to make one condition */ if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) return (err); m_head = *m_headp; } retry: err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); defrag: if (__predict_false(err)) { switch (err) { case EFBIG: /* try collapse once and defrag once */ if (remap == 0) m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); if (remap == 1) m_head = m_defrag(*m_headp, M_NOWAIT); remap++; if (__predict_false(m_head == NULL)) goto defrag_failed; txq->ift_mbuf_defrag++; *m_headp = m_head; goto retry; break; case ENOMEM: txq->ift_no_tx_dma_setup++; break; default: txq->ift_no_tx_dma_setup++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; break; } txq->ift_map_failed++; DBG_COUNTER_INC(encap_load_mbuf_fail); return (err); } /* * XXX assumes a 1 to 1 relationship between segments and * descriptors - this does not hold true on all drivers, e.g. * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; if (map != NULL) bus_dmamap_unload(desc_tag, map); DBG_COUNTER_INC(encap_txq_avail_fail); if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) GROUPTASK_ENQUEUE(&txq->ift_task); return (ENOBUFS); } pi.ipi_segs = segs; pi.ipi_nsegs = nsegs; MPASS(pidx >= 0 && pidx < txq->ift_size); #ifdef PKT_DEBUG print_pkt(&pi); #endif if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); DBG_COUNTER_INC(tx_encap); MPASS(pi.ipi_new_pidx >= 0 && pi.ipi_new_pidx < txq->ift_size); ndesc = pi.ipi_new_pidx - pi.ipi_pidx; if (pi.ipi_new_pidx < pi.ipi_pidx) { ndesc += txq->ift_size; txq->ift_gen = 1; } /* * drivers can need as many as * two sentinels */ MPASS(ndesc <= pi.ipi_nsegs + 2); MPASS(pi.ipi_new_pidx != pidx); MPASS(ndesc > 0); txq->ift_in_use += ndesc; /* * We update the last software descriptor again here because there may * be a sentinel and/or there may be more mbufs than segments */ txq->ift_pidx = pi.ipi_new_pidx; txq->ift_npending += pi.ipi_ndescs; } else if (__predict_false(err == EFBIG && remap < 2)) { *m_headp = m_head = iflib_remove_mbuf(txq); remap = 1; txq->ift_txd_encap_efbig++; goto defrag; } else DBG_COUNTER_INC(encap_txd_encap_fail); return (err); defrag_failed: txq->ift_mbuf_defrag_failed++; txq->ift_map_failed++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; return (ENOMEM); } /* forward compatibility for cxgb */ #define FIRST_QSET(ctx) 0 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) /* if there are more than TXQ_MIN_OCCUPANCY packets pending we consider deferring * doorbell writes * * ORing with 2 assures that min occupancy is never less than 2 without any conditional logic */ #define TXQ_MIN_OCCUPANCY(size) ((size >> 6)| 0x2) static inline int iflib_txq_min_occupancy(iflib_txq_t txq) { if_ctx_t ctx; ctx = txq->ift_ctx; return (get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen) < TXQ_MIN_OCCUPANCY(txq->ift_size) + MAX_TX_DESC(ctx)); } static void iflib_tx_desc_free(iflib_txq_t txq, int n) { int hasmap; uint32_t qsize, cidx, mask, gen; struct mbuf *m, **ifsd_m; uint8_t *ifsd_flags; bus_dmamap_t *ifsd_map; cidx = txq->ift_cidx; gen = txq->ift_gen; qsize = txq->ift_size; mask = qsize-1; hasmap = txq->ift_sds.ifsd_map != NULL; ifsd_flags = txq->ift_sds.ifsd_flags; ifsd_m = txq->ift_sds.ifsd_m; ifsd_map = txq->ift_sds.ifsd_map; while (n--) { prefetch(ifsd_m[(cidx + 3) & mask]); prefetch(ifsd_m[(cidx + 4) & mask]); if (ifsd_m[cidx] != NULL) { prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { /* * does it matter if it's not the TSO tag? If so we'll * have to add the type to flags */ bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; } if ((m = ifsd_m[cidx]) != NULL) { /* XXX we don't support any drivers that batch packets yet */ MPASS(m->m_nextpkt == NULL); m_free(m); ifsd_m[cidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif DBG_COUNTER_INC(tx_frees); } } if (__predict_false(++cidx == qsize)) { cidx = 0; gen = 0; } } txq->ift_cidx = cidx; txq->ift_gen = gen; } static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) { int reclaim; if_ctx_t ctx = txq->ift_ctx; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); /* * Need a rate-limiting check so that this isn't called every time */ iflib_tx_credits_update(ctx, txq); reclaim = DESC_RECLAIMABLE(txq); if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { #ifdef INVARIANTS if (iflib_verbose_debug) { printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, reclaim, thresh); } #endif return (0); } iflib_tx_desc_free(txq, reclaim); txq->ift_cleaned += reclaim; txq->ift_in_use -= reclaim; if (txq->ift_active == FALSE) txq->ift_active = TRUE; return (reclaim); } static struct mbuf ** _ring_peek_one(struct ifmp_ring *r, int cidx, int offset) { return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (r->size-1)])); } static void iflib_txq_check_drain(iflib_txq_t txq, int budget) { ifmp_ring_check_drainage(txq->ift_br[0], budget); } static uint32_t iflib_txq_can_drain(struct ifmp_ring *r) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, txq->ift_cidx_processed, false)); } static uint32_t iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; if_t ifp = ctx->ifc_ifp; struct mbuf **mp, *m; int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail, err, in_use_prev, desc_used; if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(txq_drain_notready); return (0); } avail = IDXDIFF(pidx, cidx, r->size); if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { DBG_COUNTER_INC(txq_drain_flushing); for (i = 0; i < avail; i++) { m_free(r->items[(cidx + i) & (r->size-1)]); r->items[(cidx + i) & (r->size-1)] = NULL; } return (avail); } iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); callout_stop(&txq->ift_db_check); CALLOUT_UNLOCK(txq); DBG_COUNTER_INC(txq_drain_oactive); return (0); } consumed = mcast_sent = bytes_sent = pkt_sent = 0; count = MIN(avail, TX_BATCH_SIZE); #ifdef INVARIANTS if (iflib_verbose_debug) printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, avail, ctx->ifc_flags, TXQ_AVAIL(txq)); #endif for (desc_used = i = 0; i < count && TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2; i++) { mp = _ring_peek_one(r, cidx, i); MPASS(mp != NULL && *mp != NULL); in_use_prev = txq->ift_in_use; if ((err = iflib_encap(txq, mp)) == ENOBUFS) { DBG_COUNTER_INC(txq_drain_encapfail); /* no room - bail out */ break; } consumed++; if (err) { DBG_COUNTER_INC(txq_drain_encapfail); /* we can't send this packet - skip it */ continue; } pkt_sent++; m = *mp; DBG_COUNTER_INC(tx_sent); bytes_sent += m->m_pkthdr.len; if (m->m_flags & M_MCAST) mcast_sent++; txq->ift_db_pending += (txq->ift_in_use - in_use_prev); desc_used += (txq->ift_in_use - in_use_prev); iflib_txd_db_check(ctx, txq, FALSE); ETHER_BPF_MTAP(ifp, m); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) break; if (desc_used >= TXQ_MAX_DB_CONSUMED(txq->ift_size)) break; } if ((iflib_min_tx_latency || iflib_txq_min_occupancy(txq)) && txq->ift_db_pending) iflib_txd_db_check(ctx, txq, TRUE); else if ((txq->ift_db_pending || TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2) && (callout_pending(&txq->ift_db_check) == 0)) { txq->ift_db_pending_queued = txq->ift_db_pending; callout_reset_on(&txq->ift_db_check, 1, iflib_txd_deferred_db_check, txq, txq->ift_db_check.c_cpu); } if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); if (mcast_sent) if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); #ifdef INVARIANTS if (iflib_verbose_debug) printf("consumed=%d\n", consumed); #endif return (consumed); } static uint32_t iflib_txq_drain_always(struct ifmp_ring *r) { return (1); } static uint32_t iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { int i, avail; struct mbuf **mp; iflib_txq_t txq; txq = r->cookie; txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); callout_stop(&txq->ift_db_check); CALLOUT_UNLOCK(txq); avail = IDXDIFF(pidx, cidx, r->size); for (i = 0; i < avail; i++) { mp = _ring_peek_one(r, cidx, i); m_freem(*mp); } MPASS(ifmp_ring_is_stalled(r) == 0); return (avail); } static void iflib_ifmp_purge(iflib_txq_t txq) { struct ifmp_ring *r; r = txq->ift_br[0]; r->drain = iflib_txq_drain_free; r->can_drain = iflib_txq_drain_always; ifmp_ring_check_drainage(r, r->size); r->drain = iflib_txq_drain; r->can_drain = iflib_txq_can_drain; } static void _task_fn_tx(void *context) { iflib_txq_t txq = context; if_ctx_t ctx = txq->ift_ctx; #ifdef IFLIB_DIAGNOSTICS txq->ift_cpu_exec_count[curcpu]++; #endif if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE); } static void _task_fn_rx(void *context) { iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; int rc; #ifdef IFLIB_DIAGNOSTICS rxq->ifr_cpu_exec_count[curcpu]++; #endif DBG_COUNTER_INC(task_fn_rxs); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if ((more = iflib_rxeof(rxq, 16 /* XXX */)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { DBG_COUNTER_INC(rx_intr_enables); rc = IFDI_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); } } if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) GROUPTASK_ENQUEUE(&rxq->ifr_task); } static void _task_fn_admin(void *context) { if_ctx_t ctx = context; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; iflib_txq_t txq; int i; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; CTX_LOCK(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); } IFDI_UPDATE_ADMIN_STATUS(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); IFDI_LINK_INTR_ENABLE(ctx); CTX_UNLOCK(ctx); if (LINK_ACTIVE(ctx) == 0) return; for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); } static void _task_fn_iov(void *context) { if_ctx_t ctx = context; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; CTX_LOCK(ctx); IFDI_VFLR_HANDLE(ctx); CTX_UNLOCK(ctx); } static int iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { int err; if_int_delay_info_t info; if_ctx_t ctx; info = (if_int_delay_info_t)arg1; ctx = info->iidi_ctx; info->iidi_req = req; info->iidi_oidp = oidp; CTX_LOCK(ctx); err = IFDI_SYSCTL_INT_DELAY(ctx, info); CTX_UNLOCK(ctx); return (err); } /********************************************************************* * * IFNET FUNCTIONS * **********************************************************************/ static void iflib_if_init_locked(if_ctx_t ctx) { iflib_stop(ctx); iflib_init_locked(ctx); } static void iflib_if_init(void *arg) { if_ctx_t ctx = arg; CTX_LOCK(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static int iflib_if_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq; int err, qidx; if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(tx_frees); m_freem(m); return (ENOBUFS); } MPASS(m->m_nextpkt == NULL); qidx = 0; if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) qidx = QIDX(ctx, m); /* * XXX calculate buf_ring based on flowid (divvy up bits?) */ txq = &ctx->ifc_txqs[qidx]; #ifdef DRIVER_BACKPRESSURE if (txq->ift_closed) { while (m != NULL) { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); m = next; } return (ENOBUFS); } #endif #ifdef notyet qidx = count = 0; mp = marr; next = m; do { count++; next = next->m_nextpkt; } while (next != NULL); if (count > nitems(marr)) if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { /* XXX check nextpkt */ m_freem(m); /* XXX simplify for now */ DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } for (next = m, i = 0; next != NULL; i++) { mp[i] = next; next = next->m_nextpkt; mp[i]->m_nextpkt = NULL; } #endif DBG_COUNTER_INC(tx_seen); err = ifmp_ring_enqueue(txq->ift_br[0], (void **)&m, 1, TX_BATCH_SIZE); if (err) { GROUPTASK_ENQUEUE(&txq->ift_task); /* support forthcoming later */ #ifdef DRIVER_BACKPRESSURE txq->ift_closed = TRUE; #endif ifmp_ring_check_drainage(txq->ift_br[0], TX_BATCH_SIZE); m_freem(m); } else if (TXQ_AVAIL(txq) < (txq->ift_size >> 1)) { GROUPTASK_ENQUEUE(&txq->ift_task); } return (err); } static void iflib_if_qflush(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq = ctx->ifc_txqs; int i; CTX_LOCK(ctx); ctx->ifc_flags |= IFC_QFLUSH; CTX_UNLOCK(ctx); for (i = 0; i < NTXQSETS(ctx); i++, txq++) while (!(ifmp_ring_is_idle(txq->ift_br[0]) || ifmp_ring_is_stalled(txq->ift_br[0]))) iflib_txq_check_drain(txq, 0); CTX_LOCK(ctx); ctx->ifc_flags &= ~IFC_QFLUSH; CTX_UNLOCK(ctx); if_qflush(ifp); } #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | \ IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) static int iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) { if_ctx_t ctx = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int err = 0, reinit = 0, bits; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP,0); if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING)) reinit = 1; #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: CTX_LOCK(ctx); if (ifr->ifr_mtu == if_getmtu(ifp)) { CTX_UNLOCK(ctx); break; } bits = if_getdrvflags(ifp); /* stop the driver and free any clusters before proceeding */ iflib_stop(ctx); if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) ctx->ifc_flags |= IFC_MULTISEG; else ctx->ifc_flags &= ~IFC_MULTISEG; err = if_setmtu(ifp, ifr->ifr_mtu); } iflib_init_locked(ctx); if_setdrvflags(ifp, bits); CTX_UNLOCK(ctx); break; case SIOCSIFFLAGS: CTX_LOCK(ctx); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); } } else reinit = 1; } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { iflib_stop(ctx); } ctx->ifc_if_flags = if_getflags(ifp); CTX_UNLOCK(ctx); break; break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); IFDI_MULTI_SET(ctx); IFDI_INTR_ENABLE(ctx); CTX_UNLOCK(ctx); } break; case SIOCSIFMEDIA: CTX_LOCK(ctx); IFDI_MEDIA_SET(ctx); CTX_UNLOCK(ctx); /* falls thru */ case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); break; case SIOCGI2C: { struct ifi2creq i2c; err = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); if (err != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { err = EINVAL; break; } if (i2c.len > sizeof(i2c.data)) { err = EINVAL; break; } if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) err = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); break; } case SIOCSIFCAP: { int mask, setmask; mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); setmask = 0; #ifdef TCP_OFFLOAD setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); #endif setmask |= (mask & IFCAP_FLAGS); if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); if ((mask & IFCAP_WOL) && (if_getcapabilities(ifp) & IFCAP_WOL) != 0) setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC)); if_vlancap(ifp); /* * want to ensure that traffic has stopped before we change any of the flags */ if (setmask) { CTX_LOCK(ctx); bits = if_getdrvflags(ifp); if (bits & IFF_DRV_RUNNING) iflib_stop(ctx); if_togglecapenable(ifp, setmask); if (bits & IFF_DRV_RUNNING) iflib_init_locked(ctx); if_setdrvflags(ifp, bits); CTX_UNLOCK(ctx); } break; } case SIOCGPRIVATE_0: case SIOCSDRVSPEC: case SIOCGDRVSPEC: CTX_LOCK(ctx); err = IFDI_PRIV_IOCTL(ctx, command, data); CTX_UNLOCK(ctx); break; default: err = ether_ioctl(ifp, command, data); break; } if (reinit) iflib_if_init(ctx); return (err); } static uint64_t iflib_if_get_counter(if_t ifp, ift_counter cnt) { if_ctx_t ctx = if_getsoftc(ifp); return (IFDI_GET_COUNTER(ctx, cnt)); } /********************************************************************* * * OTHER FUNCTIONS EXPORTED TO THE STACK * **********************************************************************/ static void iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_REGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_UNREGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_led_func(void *arg, int onoff) { if_ctx_t ctx = arg; CTX_LOCK(ctx); IFDI_LED_FUNC(ctx, onoff); CTX_UNLOCK(ctx); } /********************************************************************* * * BUS FUNCTION DEFINITIONS * **********************************************************************/ int iflib_device_probe(device_t dev) { pci_vendor_info_t *ent; uint16_t pci_vendor_id, pci_device_id; uint16_t pci_subvendor_id, pci_subdevice_id; uint16_t pci_rev_id; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); pci_rev_id = pci_get_revid(dev); if (sctx->isc_parse_devinfo != NULL) sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); ent = sctx->isc_vendor_info; while (ent->pvi_vendor_id != 0) { if (pci_vendor_id != ent->pvi_vendor_id) { ent++; continue; } if ((pci_device_id == ent->pvi_device_id) && ((pci_subvendor_id == ent->pvi_subvendor_id) || (ent->pvi_subvendor_id == 0)) && ((pci_subdevice_id == ent->pvi_subdevice_id) || (ent->pvi_subdevice_id == 0)) && ((pci_rev_id == ent->pvi_rev_id) || (ent->pvi_rev_id == 0))) { device_set_desc_copy(dev, ent->pvi_name); /* this needs to be changed to zero if the bus probing code * ever stops re-probing on best match because the sctx * may have its values over written by register calls * in subsequent probes */ return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } int iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) { int err, rid, msix, msix_bar; if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; int i; uint16_t main_txq; uint16_t main_rxq; ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); if (sc == NULL) { sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); device_set_softc(dev, ctx); ctx->ifc_flags |= IFC_SC_ALLOCATED; } ctx->ifc_sctx = sctx; ctx->ifc_dev = dev; ctx->ifc_softc = sc; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "iflib_register failed %d\n", err); return (err); } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; /* * XXX sanity check that ntxd & nrxd are a power of 2 */ if (ctx->ifc_sysctl_ntxqs != 0) scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; if (ctx->ifc_sysctl_nrxqs != 0) scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; for (i = 0; i < sctx->isc_ntxqs; i++) { if (ctx->ifc_sysctl_ntxds[i] != 0) scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; else scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (ctx->ifc_sysctl_nrxds[i] != 0) scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; else scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; } if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; } if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; } } if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); return (err); } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; #ifdef INVARIANTS MPASS(scctx->isc_capenable); if (scctx->isc_capenable & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capenable); if_setcapenable(ifp, scctx->isc_capenable); if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; #ifdef ACPI_DMAR if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) ctx->ifc_flags |= IFC_DMAR; #endif msix_bar = scctx->isc_msix_bar; if(sctx->isc_flags & IFLIB_HAS_TXCQ) main_txq = 1; else main_txq = 0; if(sctx->isc_flags & IFLIB_HAS_RXCQ) main_rxq = 1; else main_rxq = 0; /* XXX change for per-queue sizes */ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); for (i = 0; i < sctx->isc_nrxqs; i++) { if (!powerof2(scctx->isc_nrxd[i])) { /* round down instead? */ device_printf(dev, "# rx descriptors must be a power of 2\n"); err = EINVAL; goto fail; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (!powerof2(scctx->isc_ntxd[i])) { device_printf(dev, "# tx descriptors must be a power of 2"); err = EINVAL; goto fail; } } if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); /* * Protect the stack against modern hardware */ if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX) scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX; /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max; ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max; ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max; if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); /* ** Now setup MSI or MSI/X, should ** return us the number of supported ** vectors. (Will be 1 for MSI) */ if (sctx->isc_flags & IFLIB_SKIP_MSIX) { msix = scctx->isc_vectors; } else if (scctx->isc_msix_bar != 0) msix = iflib_msix_init(ctx); else { scctx->isc_vectors = 1; scctx->isc_ntxqsets = 1; scctx->isc_nrxqsets = 1; scctx->isc_intr = IFLIB_INTR_LEGACY; msix = 0; } /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail; } if ((err = iflib_qset_structures_setup(ctx))) { device_printf(dev, "qset structure setup failed %d\n", err); goto fail_queues; } - + /* + * Group taskqueues aren't properly set up until SMP is started, + * so we disable interrupts until we can handle them post + * SI_SUB_SMP. + * + * XXX: disabling interrupts doesn't actually work, at least for + * the non-MSI case. When they occur before SI_SUB_SMP completes, + * we do null handling and depend on this not causing too large an + * interrupt storm. + */ IFDI_INTR_DISABLE(ctx); if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); goto fail_intr_free; } if (msix <= 1) { rid = 0; if (scctx->isc_intr == IFLIB_INTR_MSI) { MPASS(msix == 1); rid = 1; } if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { device_printf(dev, "iflib_legacy_setup failed %d\n", err); goto fail_intr_free; } } ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } if ((err = iflib_netmap_attach(ctx))) { device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); goto fail_detach; } *ctxp = ctx; if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_intr_free: if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) pci_release_msi(ctx->ifc_dev); fail_queues: /* XXX free queues */ fail: IFDI_DETACH(ctx); return (err); } int iflib_device_attach(device_t dev) { if_ctx_t ctx; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_enable_busmaster(dev); return (iflib_device_register(dev, NULL, sctx, &ctx)); } int iflib_device_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq; iflib_rxq_t rxq; device_t dev = ctx->ifc_dev; int i; struct taskqgroup *tqg; /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(ifp)) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } CTX_LOCK(ctx); ctx->ifc_in_detach = 1; iflib_stop(ctx); CTX_UNLOCK(ctx); /* Unregister VLAN events */ if (ctx->ifc_vlan_attach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); if (ctx->ifc_vlan_detach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); iflib_netmap_detach(ifp); ether_ifdetach(ifp); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ CTX_LOCK_DESTROY(ctx); if (ctx->ifc_led_dev != NULL) led_destroy(ctx->ifc_led_dev); /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); callout_drain(&txq->ift_db_check); if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); IFDI_DETACH(ctx); device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { pci_release_msi(dev); } if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { iflib_irq_free(ctx, &ctx->ifc_legacy_irq); } if (ctx->ifc_msix_mem != NULL) { bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } bus_generic_detach(dev); if_free(ifp); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (0); } int iflib_device_detach(device_t dev) { if_ctx_t ctx = device_get_softc(dev); return (iflib_device_deregister(ctx)); } int iflib_device_suspend(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SUSPEND(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_shutdown(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SHUTDOWN(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_resume(device_t dev) { if_ctx_t ctx = device_get_softc(dev); iflib_txq_t txq = ctx->ifc_txqs; CTX_LOCK(ctx); IFDI_RESUME(ctx); iflib_init_locked(ctx); CTX_UNLOCK(ctx); for (int i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); return (bus_generic_resume(dev)); } int iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_INIT(ctx, num_vfs, params); CTX_UNLOCK(ctx); return (error); } void iflib_device_iov_uninit(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_IOV_UNINIT(ctx); CTX_UNLOCK(ctx); } int iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_VF_ADD(ctx, vfnum, params); CTX_UNLOCK(ctx); return (error); } /********************************************************************* * * MODULE FUNCTION DEFINITIONS * **********************************************************************/ /* * - Start a fast taskqueue thread for each core * - Start a taskqueue for control operations */ static int iflib_module_init(void) { return (0); } static int iflib_module_event_handler(module_t mod, int what, void *arg) { int err; switch (what) { case MOD_LOAD: if ((err = iflib_module_init()) != 0) return (err); break; case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } return (0); } /********************************************************************* * * PUBLIC FUNCTION DEFINITIONS * ordered as in iflib.h * **********************************************************************/ static void _iflib_assert(if_shared_ctx_t sctx) { MPASS(sctx->isc_tx_maxsize); MPASS(sctx->isc_tx_maxsegsize); MPASS(sctx->isc_rx_maxsize); MPASS(sctx->isc_rx_nsegments); MPASS(sctx->isc_rx_maxsegsize); MPASS(sctx->isc_nrxd_min[0]); MPASS(sctx->isc_nrxd_max[0]); MPASS(sctx->isc_nrxd_default[0]); MPASS(sctx->isc_ntxd_min[0]); MPASS(sctx->isc_ntxd_max[0]); MPASS(sctx->isc_ntxd_default[0]); } static void _iflib_pre_assert(if_softc_ctx_t scctx) { MPASS(scctx->isc_txrx->ift_txd_encap); MPASS(scctx->isc_txrx->ift_txd_flush); MPASS(scctx->isc_txrx->ift_txd_credits_update); MPASS(scctx->isc_txrx->ift_rxd_available); MPASS(scctx->isc_txrx->ift_rxd_pkt_get); MPASS(scctx->isc_txrx->ift_rxd_refill); MPASS(scctx->isc_txrx->ift_rxd_flush); } static int iflib_register(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; driver_t *driver = sctx->isc_driver; device_t dev = ctx->ifc_dev; if_t ifp; _iflib_assert(sctx); CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (ENOMEM); } /* * Initialize our context's device specific methods */ kobj_init((kobj_t) ctx, (kobj_class_t) driver); kobj_class_compile((kobj_class_t) driver); driver->refs++; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setsoftc(ifp, ctx); if_setdev(ifp, dev); if_setinitfn(ifp, iflib_if_init); if_setioctlfn(ifp, iflib_if_ioctl); if_settransmitfn(ifp, iflib_if_transmit); if_setqflushfn(ifp, iflib_if_qflush); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ctx->ifc_vlan_attach_event = EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, EVENTHANDLER_PRI_FIRST); ctx->ifc_vlan_detach_event = EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, EVENTHANDLER_PRI_FIRST); ifmedia_init(&ctx->ifc_media, IFM_IMASK, iflib_media_change, iflib_media_status); return (0); } static int iflib_queues_alloc(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int nrxqsets = scctx->isc_nrxqsets; int ntxqsets = scctx->isc_ntxqsets; iflib_txq_t txq; iflib_rxq_t rxq; iflib_fl_t fl = NULL; int i, j, cpu, err, txconf, rxconf; iflib_dma_info_t ifdip; uint32_t *rxqsizes = scctx->isc_rxqsizes; uint32_t *txqsizes = scctx->isc_txqsizes; uint8_t nrxqs = sctx->isc_nrxqs; uint8_t ntxqs = sctx->isc_ntxqs; int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; caddr_t *vaddrs; uint64_t *paddrs; struct ifmp_ring **brscp; int nbuf_rings = 1; /* XXX determine dynamically */ KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); brscp = NULL; txq = NULL; rxq = NULL; /* Allocate the TX ring struct memory */ if (!(txq = (iflib_txq_t) malloc(sizeof(struct iflib_txq) * ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); err = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(rxq = (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); err = ENOMEM; goto rx_fail; } if (!(brscp = malloc(sizeof(void *) * nbuf_rings * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to buf_ring_sc * memory\n"); err = ENOMEM; goto rx_fail; } ctx->ifc_txqs = txq; ctx->ifc_rxqs = rxq; /* * XXX handle allocation failure */ for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_ifdi = ifdip; for (j = 0; j < ntxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } bzero((void *)ifdip->idi_vaddr, txqsizes[j]); } txq->ift_ctx = ctx; txq->ift_id = i; if (sctx->isc_flags & IFLIB_HAS_TXCQ) { txq->ift_br_offset = 1; } else { txq->ift_br_offset = 0; } /* XXX fix this */ txq->ift_timer.c_cpu = cpu; txq->ift_db_check.c_cpu = cpu; txq->ift_nbr = nbuf_rings; if (iflib_txsd_alloc(txq)) { device_printf(dev, "Critical Failure setting up TX buffers\n"); err = ENOMEM; goto err_tx_desc; } /* Initialize the TX lock */ snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", device_get_nameunit(dev), txq->ift_id); mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); callout_init_mtx(&txq->ift_db_check, &txq->ift_mtx, 0); snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", device_get_nameunit(dev), txq->ift_id); TXDB_LOCK_INIT(txq); txq->ift_br = brscp + i*nbuf_rings; for (j = 0; j < nbuf_rings; j++) { err = ifmp_ring_alloc(&txq->ift_br[j], 2048, txq, iflib_txq_drain, iflib_txq_can_drain, M_IFLIB, M_WAITOK); if (err) { /* XXX free any allocated rings */ device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } } } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_ifdi = ifdip; for (j = 0; j < nrxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); } rxq->ifr_ctx = ctx; rxq->ifr_id = i; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { rxq->ifr_fl_offset = 1; } else { rxq->ifr_fl_offset = 0; } rxq->ifr_nfl = nfree_lists; if (!(fl = (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate free list memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_fl = fl; for (j = 0; j < nfree_lists; j++) { rxq->ifr_fl[j].ifl_rxq = rxq; rxq->ifr_fl[j].ifl_id = j; rxq->ifr_fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; } /* Allocate receive buffers for the ring*/ if (iflib_rxsd_alloc(rxq)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); err = ENOMEM; goto err_rx_desc; } } /* TXQs */ vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); for (i = 0; i < ntxqsets; i++) { iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; for (j = 0; j < ntxqs; j++, di++) { vaddrs[i*ntxqs + j] = di->idi_vaddr; paddrs[i*ntxqs + j] = di->idi_paddr; } } if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); /* RXQs */ vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); for (i = 0; i < nrxqsets; i++) { iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; for (j = 0; j < nrxqs; j++, di++) { vaddrs[i*nrxqs + j] = di->idi_vaddr; paddrs[i*nrxqs + j] = di->idi_paddr; } } if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); return (0); /* XXX handle allocation failure changes */ err_rx_desc: err_tx_desc: if (ctx->ifc_rxqs != NULL) free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; if (ctx->ifc_txqs != NULL) free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; rx_fail: if (brscp != NULL) free(brscp, M_IFLIB); if (rxq != NULL) free(rxq, M_IFLIB); if (txq != NULL) free(txq, M_IFLIB); fail: return (err); } static int iflib_tx_structures_setup(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i; for (i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_setup(txq); return (0); } static void iflib_tx_structures_free(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i, j; for (i = 0; i < NTXQSETS(ctx); i++, txq++) { iflib_txq_destroy(txq); for (j = 0; j < ctx->ifc_nhwtxqs; j++) iflib_dma_free(&txq->ift_ifdi[j]); } free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; IFDI_QUEUES_FREE(ctx); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int iflib_rx_structures_setup(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; int q; #if defined(INET6) || defined(INET) int i, err; #endif for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { #if defined(INET6) || defined(INET) tcp_lro_free(&rxq->ifr_lc); if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, TCP_LRO_ENTRIES, min(1024, ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); goto fail; } rxq->ifr_lro_enabled = TRUE; #endif IFDI_RXQ_SETUP(ctx, rxq->ifr_id); } return (0); #if defined(INET6) || defined(INET) fail: /* * Free RX software descriptors allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ rxq = ctx->ifc_rxqs; for (i = 0; i < q; ++i, rxq++) { iflib_rx_sds_free(rxq); rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } return (err); #endif } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void iflib_rx_structures_free(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { iflib_rx_sds_free(rxq); } } static int iflib_qset_structures_setup(if_ctx_t ctx) { int err; if ((err = iflib_tx_structures_setup(ctx)) != 0) return (err); if ((err = iflib_rx_structures_setup(ctx)) != 0) { device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); } return (err); } int iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name) { return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); } static int find_nth(if_ctx_t ctx, cpuset_t *cpus, int qid) { int i, cpuid, eqid, count; CPU_COPY(&ctx->ifc_cpus, cpus); count = CPU_COUNT(&ctx->ifc_cpus); eqid = qid % count; /* clear up to the qid'th bit */ for (i = 0; i < eqid; i++) { cpuid = CPU_FFS(cpus); MPASS(cpuid != 0); CPU_CLR(cpuid-1, cpus); } cpuid = CPU_FFS(cpus); MPASS(cpuid != 0); return (cpuid-1); } int iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, iflib_intr_type_t type, driver_filter_t *filter, void *filter_arg, int qid, char *name) { struct grouptask *gtask; struct taskqgroup *tqg; iflib_filter_info_t info; cpuset_t cpus; gtask_fn_t *fn; int tqrid, err, cpuid; void *q; info = &ctx->ifc_filter_info; tqrid = rid; switch (type) { /* XXX merge tx/rx for netmap? */ case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; info = &ctx->ifc_txqs[qid].ift_filter_info; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; tqrid = -1; info = &ctx->ifc_filter_info; gtask = &ctx->ifc_admin_task; tqg = qgroup_if_config_tqg; fn = _task_fn_admin; break; default: panic("unknown net intr type"); } info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = ctx; err = _iflib_irq_alloc(ctx, irq, rid, iflib_fast_intr, NULL, info, name); if (err != 0) { device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); return (err); } if (type == IFLIB_INTR_ADMIN) return (0); if (tqrid != -1) { cpuid = find_nth(ctx, &cpus, qid); taskqgroup_attach_cpu(tqg, gtask, q, cpuid, irq->ii_rid, name); } else { taskqgroup_attach(tqg, gtask, q, tqrid, name); } return (0); } void iflib_softirq_alloc_generic(if_ctx_t ctx, int rid, iflib_intr_type_t type, void *arg, int qid, char *name) { struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; void *q; switch (type) { case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; break; case IFLIB_INTR_IOV: q = ctx; gtask = &ctx->ifc_vflr_task; tqg = qgroup_if_config_tqg; rid = -1; fn = _task_fn_iov; break; default: panic("unknown net intr type"); } GROUPTASK_INIT(gtask, 0, fn, q); taskqgroup_attach(tqg, gtask, q, rid, name); } void iflib_irq_free(if_ctx_t ctx, if_irq_t irq) { if (irq->ii_tag) bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); if (irq->ii_res) bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); } static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_irq_t irq = &ctx->ifc_legacy_irq; iflib_filter_info_t info; struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; int tqrid; void *q; int err; - - /* - * group taskqueues aren't properly set up until SMP is started - * so we disable interrupts until we can handle them post - * SI_SUB_SMP - */ - IFDI_INTR_DISABLE(ctx); q = &ctx->ifc_rxqs[0]; info = &rxq[0].ifr_filter_info; gtask = &rxq[0].ifr_task; tqg = qgroup_if_io_tqg; tqrid = irq->ii_rid = *rid; fn = _task_fn_rx; ctx->ifc_flags |= IFC_LEGACY; info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = ctx; /* We allocate a single interrupt resource */ if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr, NULL, info, name)) != 0) return (err); GROUPTASK_INIT(gtask, 0, fn, q); taskqgroup_attach(tqg, gtask, q, tqrid, name); GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, tqrid, "tx"); return (0); } void iflib_led_create(if_ctx_t ctx) { ctx->ifc_led_dev = led_create(iflib_led_func, ctx, device_get_nameunit(ctx->ifc_dev)); } void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) { GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } void iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) { GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); } void iflib_admin_intr_deferred(if_ctx_t ctx) { #ifdef INVARIANTS struct grouptask *gtask; gtask = &ctx->ifc_admin_task; MPASS(gtask->gt_taskqueue != NULL); #endif GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); } void iflib_iov_intr_deferred(if_ctx_t ctx) { GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); } void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) { taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); } void iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn, char *name) { GROUPTASK_INIT(gtask, 0, fn, ctx); taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); } void iflib_config_gtask_deinit(struct grouptask *gtask) { taskqgroup_detach(qgroup_if_config_tqg, gtask); } void iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq = ctx->ifc_txqs; if_setbaudrate(ifp, baudrate); /* If link down, disable watchdog */ if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) txq->ift_qstatus = IFLIB_QUEUE_IDLE; } ctx->ifc_link_state = link_state; if_link_state_change(ifp, link_state); } static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) { int credits; #ifdef INVARIANTS int credits_pre = txq->ift_cidx_processed; #endif if (ctx->isc_txd_credits_update == NULL) return (0); if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, txq->ift_cidx_processed, true)) == 0) return (0); txq->ift_processed += credits; txq->ift_cidx_processed += credits; MPASS(credits_pre + credits == txq->ift_cidx_processed); if (txq->ift_cidx_processed >= txq->ift_size) txq->ift_cidx_processed -= txq->ift_size; return (credits); } static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, int cidx, int budget) { return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, budget)); } void iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, const char *description, if_int_delay_info_t info, int offset, int value) { info->iidi_ctx = ctx; info->iidi_offset = offset; info->iidi_value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, iflib_sysctl_int_delay, "I", description); } struct mtx * iflib_ctx_lock_get(if_ctx_t ctx) { return (&ctx->ifc_mtx); } static int iflib_msix_init(if_ctx_t ctx) { device_t dev = ctx->ifc_dev; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; int iflib_num_tx_queues, iflib_num_rx_queues; int err, admincnt, bar; iflib_num_tx_queues = scctx->isc_ntxqsets; iflib_num_rx_queues = scctx->isc_nrxqsets; device_printf(dev, "msix_init qsets capped at %d\n", iflib_num_tx_queues); bar = ctx->ifc_softc_ctx.isc_msix_bar; admincnt = sctx->isc_admin_intrcnt; /* Override by tuneable */ if (enable_msix == 0) goto msi; /* ** When used in a virtualized environment ** PCI BUSMASTER capability may not be set ** so explicity set it here and rewrite ** the ENABLE in the MSIX control register ** at this point to cause the host to ** successfully initialize us. */ { uint16_t pci_cmd_word; int msix_ctrl, rid; rid = 0; pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); pci_cmd_word |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); pci_find_cap(dev, PCIY_MSIX, &rid); rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } /* * bar == -1 => "trust me I know what I'm doing" * https://www.youtube.com/watch?v=nnwWKkNau4I * Some drivers are for hardware that is so shoddily * documented that no one knows which bars are which * so the developer has to map all bars. This hack * allows shoddy garbage to use msix in this framework. */ if (bar != -1) { ctx->ifc_msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (ctx->ifc_msix_mem == NULL) { /* May not be enabled */ device_printf(dev, "Unable to map MSIX table \n"); goto msi; } } /* First try MSI/X */ if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ device_printf(dev, "System has MSIX disabled \n"); bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; goto msi; } #if IFLIB_DEBUG /* use only 1 qset in debug mode */ queuemsgs = min(msgs - admincnt, 1); #else queuemsgs = msgs - admincnt; #endif if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) == 0) { #ifdef RSS queues = imin(queuemsgs, rss_getnumbuckets()); #else queues = queuemsgs; #endif queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); } else { device_printf(dev, "Unable to fetch CPU list\n"); /* Figure out a reasonable auto config value */ queues = min(queuemsgs, mp_ncpus); } #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) rx_queues = iflib_num_rx_queues; else rx_queues = queues; /* * We want this to be all logical CPUs by default */ if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) tx_queues = iflib_num_tx_queues; else tx_queues = mp_ncpus; if (ctx->ifc_sysctl_qs_eq_override == 0) { #ifdef INVARIANTS if (tx_queues != rx_queues) device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", min(rx_queues, tx_queues), min(rx_queues, tx_queues)); #endif tx_queues = min(rx_queues, tx_queues); rx_queues = min(rx_queues, tx_queues); } device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); vectors = rx_queues + admincnt; if ((err = pci_alloc_msix(dev, &vectors)) == 0) { device_printf(dev, "Using MSIX interrupts with %d vectors\n", vectors); scctx->isc_vectors = vectors; scctx->isc_nrxqsets = rx_queues; scctx->isc_ntxqsets = tx_queues; scctx->isc_intr = IFLIB_INTR_MSIX; return (vectors); } else { device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); } msi: vectors = pci_msi_count(dev); scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets = 1; scctx->isc_vectors = vectors; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { device_printf(dev,"Using an MSI interrupt\n"); scctx->isc_intr = IFLIB_INTR_MSI; } else { device_printf(dev,"Using a Legacy interrupt\n"); scctx->isc_intr = IFLIB_INTR_LEGACY; } return (vectors); } char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; static int mp_ring_state_handler(SYSCTL_HANDLER_ARGS) { int rc; uint16_t *state = ((uint16_t *)oidp->oid_arg1); struct sbuf *sb; char *ring_state = "UNKNOWN"; /* XXX needed ? */ rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); MPASS(sb != NULL); if (sb == NULL) return (ENOMEM); if (state[3] <= 3) ring_state = ring_states[state[3]]; sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", state[0], state[1], state[2], ring_state); rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } enum iflib_ndesc_handler { IFLIB_NTXD_HANDLER, IFLIB_NRXD_HANDLER, }; static int mp_ndesc_handler(SYSCTL_HANDLER_ARGS) { if_ctx_t ctx = (void *)arg1; enum iflib_ndesc_handler type = arg2; char buf[256] = {0}; uint16_t *ndesc; char *p, *next; int nqs, rc, i; MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); nqs = 8; switch(type) { case IFLIB_NTXD_HANDLER: ndesc = ctx->ifc_sysctl_ntxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_ntxqs; break; case IFLIB_NRXD_HANDLER: ndesc = ctx->ifc_sysctl_nrxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_nrxqs; break; } if (nqs == 0) nqs = 8; for (i=0; i<8; i++) { if (i >= nqs) break; if (i) strcat(buf, ","); sprintf(strchr(buf, 0), "%d", ndesc[i]); } rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; i++, p = strsep(&next, " ,")) { ndesc[i] = strtoul(p, NULL, 10); } return(rc); } #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child, *oid_list; struct sysctl_ctx_list *ctx_list; struct sysctl_oid *node; ctx_list = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", CTLFLAG_RD, NULL, "IFLIB fields"); oid_list = SYSCTL_CHILDREN(node); SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, "driver version"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, "# of txqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, "# of rxqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, "permit #txq != #rxq"); /* XXX change for per-queue sizes */ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", "list of # of tx descriptors to use, 0 = use default #"); SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", "list of # of rx descriptors to use, 0 = use default #"); } static void iflib_add_device_sysctl_post(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx_list; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; char namebuf[NAME_BUFLEN]; char *qfmt; struct sysctl_oid *queue_node, *fl_node, *node; struct sysctl_oid_list *queue_list, *fl_list; ctx_list = device_get_sysctl_ctx(dev); node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) qfmt = "txq%02d"; else qfmt = "txq%d"; for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued"); #endif SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", CTLFLAG_RD, &txq->ift_mbuf_defrag, "# of times m_defrag was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", CTLFLAG_RD, &txq->ift_pullups, "# of times m_pullup was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail, "# of times no descriptors were available"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed, "# of times dma map failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", CTLFLAG_RD, &txq->ift_processed, "descriptors procesed for clean"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", CTLFLAG_RD, &txq->ift_cleaned, "total cleaned"); SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br[0]->state), 0, mp_ring_state_handler, "A", "soft ring state"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->ift_br[0]->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->ift_br[0]->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->ift_br[0]->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->ift_br[0]->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->ift_br[0]->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->ift_br[0]->abdications, "# of consumer abdications in the mp_ring for this queue"); } if (scctx->isc_nrxqsets > 100) qfmt = "rxq%03d"; else if (scctx->isc_nrxqsets > 10) qfmt = "rxq%02d"; else qfmt = "rxq%d"; for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); if (sctx->isc_flags & IFLIB_HAS_RXCQ) { SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", CTLFLAG_RD, &rxq->ifr_cq_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1, "Consumer Index"); } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "freelist Name"); fl_list = SYSCTL_CHILDREN(fl_node); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", CTLFLAG_RD, &fl->ifl_credits, 1, "credits available"); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued, "mbufs allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued, "mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued, "clusters allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued, "clusters freed"); #endif } } } Index: projects/clang400-import/sys/net80211/ieee80211.c =================================================================== --- projects/clang400-import/sys/net80211/ieee80211.c (revision 312719) +++ projects/clang400-import/sys/net80211/ieee80211.c (revision 312720) @@ -1,2493 +1,2493 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11 generic handler */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #include #include #include const char *ieee80211_phymode_name[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = "auto", [IEEE80211_MODE_11A] = "11a", [IEEE80211_MODE_11B] = "11b", [IEEE80211_MODE_11G] = "11g", [IEEE80211_MODE_FH] = "FH", [IEEE80211_MODE_TURBO_A] = "turboA", [IEEE80211_MODE_TURBO_G] = "turboG", [IEEE80211_MODE_STURBO_A] = "sturboA", [IEEE80211_MODE_HALF] = "half", [IEEE80211_MODE_QUARTER] = "quarter", [IEEE80211_MODE_11NA] = "11na", [IEEE80211_MODE_11NG] = "11ng", [IEEE80211_MODE_VHT_2GHZ] = "11acg", [IEEE80211_MODE_VHT_5GHZ] = "11ac", }; /* map ieee80211_opmode to the corresponding capability bit */ const int ieee80211_opcap[IEEE80211_OPMODE_MAX] = { [IEEE80211_M_IBSS] = IEEE80211_C_IBSS, [IEEE80211_M_WDS] = IEEE80211_C_WDS, [IEEE80211_M_STA] = IEEE80211_C_STA, [IEEE80211_M_AHDEMO] = IEEE80211_C_AHDEMO, [IEEE80211_M_HOSTAP] = IEEE80211_C_HOSTAP, [IEEE80211_M_MONITOR] = IEEE80211_C_MONITOR, #ifdef IEEE80211_SUPPORT_MESH [IEEE80211_M_MBSS] = IEEE80211_C_MBSS, #endif }; const uint8_t ieee80211broadcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static void ieee80211_syncflag_locked(struct ieee80211com *ic, int flag); static void ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag); static void ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag); static void ieee80211_syncflag_vht_locked(struct ieee80211com *ic, int flag); static int ieee80211_media_setup(struct ieee80211com *ic, struct ifmedia *media, int caps, int addsta, ifm_change_cb_t media_change, ifm_stat_cb_t media_stat); static int media_status(enum ieee80211_opmode, const struct ieee80211_channel *); static uint64_t ieee80211_get_counter(struct ifnet *, ift_counter); MALLOC_DEFINE(M_80211_VAP, "80211vap", "802.11 vap state"); /* * Default supported rates for 802.11 operation (in IEEE .5Mb units). */ #define B(r) ((r) | IEEE80211_RATE_BASIC) static const struct ieee80211_rateset ieee80211_rateset_11a = { 8, { B(12), 18, B(24), 36, B(48), 72, 96, 108 } }; static const struct ieee80211_rateset ieee80211_rateset_half = { 8, { B(6), 9, B(12), 18, B(24), 36, 48, 54 } }; static const struct ieee80211_rateset ieee80211_rateset_quarter = { 8, { B(3), 4, B(6), 9, B(12), 18, 24, 27 } }; static const struct ieee80211_rateset ieee80211_rateset_11b = { 4, { B(2), B(4), B(11), B(22) } }; /* NB: OFDM rates are handled specially based on mode */ static const struct ieee80211_rateset ieee80211_rateset_11g = { 12, { B(2), B(4), B(11), B(22), 12, 18, 24, 36, 48, 72, 96, 108 } }; #undef B static int set_vht_extchan(struct ieee80211_channel *c); /* * Fill in 802.11 available channel set, mark * all available channels as active, and pick * a default channel if not already specified. */ void ieee80211_chan_init(struct ieee80211com *ic) { #define DEFAULTRATES(m, def) do { \ if (ic->ic_sup_rates[m].rs_nrates == 0) \ ic->ic_sup_rates[m] = def; \ } while (0) struct ieee80211_channel *c; int i; KASSERT(0 < ic->ic_nchans && ic->ic_nchans <= IEEE80211_CHAN_MAX, ("invalid number of channels specified: %u", ic->ic_nchans)); memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail)); memset(ic->ic_modecaps, 0, sizeof(ic->ic_modecaps)); setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO); for (i = 0; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; KASSERT(c->ic_flags != 0, ("channel with no flags")); /* * Help drivers that work only with frequencies by filling * in IEEE channel #'s if not already calculated. Note this * mimics similar work done in ieee80211_setregdomain when * changing regulatory state. */ if (c->ic_ieee == 0) c->ic_ieee = ieee80211_mhz2ieee(c->ic_freq,c->ic_flags); /* * Setup the HT40/VHT40 upper/lower bits. * The VHT80 math is done elsewhere. */ if (IEEE80211_IS_CHAN_HT40(c) && c->ic_extieee == 0) c->ic_extieee = ieee80211_mhz2ieee(c->ic_freq + (IEEE80211_IS_CHAN_HT40U(c) ? 20 : -20), c->ic_flags); /* Update VHT math */ /* * XXX VHT again, note that this assumes VHT80 channels * are legit already */ set_vht_extchan(c); /* default max tx power to max regulatory */ if (c->ic_maxpower == 0) c->ic_maxpower = 2*c->ic_maxregpower; setbit(ic->ic_chan_avail, c->ic_ieee); /* * Identify mode capabilities. */ if (IEEE80211_IS_CHAN_A(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11A); if (IEEE80211_IS_CHAN_B(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11B); if (IEEE80211_IS_CHAN_ANYG(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11G); if (IEEE80211_IS_CHAN_FHSS(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_FH); if (IEEE80211_IS_CHAN_108A(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_A); if (IEEE80211_IS_CHAN_108G(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_G); if (IEEE80211_IS_CHAN_ST(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_STURBO_A); if (IEEE80211_IS_CHAN_HALF(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_HALF); if (IEEE80211_IS_CHAN_QUARTER(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_QUARTER); if (IEEE80211_IS_CHAN_HTA(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11NA); if (IEEE80211_IS_CHAN_HTG(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_11NG); if (IEEE80211_IS_CHAN_VHTA(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_VHT_5GHZ); if (IEEE80211_IS_CHAN_VHTG(c)) setbit(ic->ic_modecaps, IEEE80211_MODE_VHT_2GHZ); } /* initialize candidate channels to all available */ memcpy(ic->ic_chan_active, ic->ic_chan_avail, sizeof(ic->ic_chan_avail)); /* sort channel table to allow lookup optimizations */ ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); /* invalidate any previous state */ ic->ic_bsschan = IEEE80211_CHAN_ANYC; ic->ic_prevchan = NULL; ic->ic_csa_newchan = NULL; /* arbitrarily pick the first channel */ ic->ic_curchan = &ic->ic_channels[0]; ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan); /* fillin well-known rate sets if driver has not specified */ DEFAULTRATES(IEEE80211_MODE_11B, ieee80211_rateset_11b); DEFAULTRATES(IEEE80211_MODE_11G, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_11A, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_TURBO_A, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_TURBO_G, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_STURBO_A, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_HALF, ieee80211_rateset_half); DEFAULTRATES(IEEE80211_MODE_QUARTER, ieee80211_rateset_quarter); DEFAULTRATES(IEEE80211_MODE_11NA, ieee80211_rateset_11a); DEFAULTRATES(IEEE80211_MODE_11NG, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_VHT_2GHZ, ieee80211_rateset_11g); DEFAULTRATES(IEEE80211_MODE_VHT_5GHZ, ieee80211_rateset_11a); /* * Setup required information to fill the mcsset field, if driver did * not. Assume a 2T2R setup for historic reasons. */ if (ic->ic_rxstream == 0) ic->ic_rxstream = 2; if (ic->ic_txstream == 0) ic->ic_txstream = 2; /* * Set auto mode to reset active channel state and any desired channel. */ (void) ieee80211_setmode(ic, IEEE80211_MODE_AUTO); #undef DEFAULTRATES } static void null_update_mcast(struct ieee80211com *ic) { ic_printf(ic, "need multicast update callback\n"); } static void null_update_promisc(struct ieee80211com *ic) { ic_printf(ic, "need promiscuous mode update callback\n"); } static void null_update_chw(struct ieee80211com *ic) { ic_printf(ic, "%s: need callback\n", __func__); } int ic_printf(struct ieee80211com *ic, const char * fmt, ...) { va_list ap; int retval; retval = printf("%s: ", ic->ic_name); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } static LIST_HEAD(, ieee80211com) ic_head = LIST_HEAD_INITIALIZER(ic_head); static struct mtx ic_list_mtx; MTX_SYSINIT(ic_list, &ic_list_mtx, "ieee80211com list", MTX_DEF); static int sysctl_ieee80211coms(SYSCTL_HANDLER_ARGS) { struct ieee80211com *ic; struct sbuf sb; char *sp; int error; error = sysctl_wire_old_buffer(req, 0); if (error) return (error); sbuf_new_for_sysctl(&sb, NULL, 8, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); sp = ""; mtx_lock(&ic_list_mtx); LIST_FOREACH(ic, &ic_head, ic_next) { sbuf_printf(&sb, "%s%s", sp, ic->ic_name); sp = " "; } mtx_unlock(&ic_list_mtx); error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } SYSCTL_PROC(_net_wlan, OID_AUTO, devices, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_ieee80211coms, "A", "names of available 802.11 devices"); /* * Attach/setup the common net80211 state. Called by * the driver on attach to prior to creating any vap's. */ void ieee80211_ifattach(struct ieee80211com *ic) { IEEE80211_LOCK_INIT(ic, ic->ic_name); IEEE80211_TX_LOCK_INIT(ic, ic->ic_name); TAILQ_INIT(&ic->ic_vaps); /* Create a taskqueue for all state changes */ ic->ic_tq = taskqueue_create("ic_taskq", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &ic->ic_tq); taskqueue_start_threads(&ic->ic_tq, 1, PI_NET, "%s net80211 taskq", ic->ic_name); ic->ic_ierrors = counter_u64_alloc(M_WAITOK); ic->ic_oerrors = counter_u64_alloc(M_WAITOK); /* * Fill in 802.11 available channel set, mark all * available channels as active, and pick a default * channel if not already specified. */ ieee80211_chan_init(ic); ic->ic_update_mcast = null_update_mcast; ic->ic_update_promisc = null_update_promisc; ic->ic_update_chw = null_update_chw; ic->ic_hash_key = arc4random(); ic->ic_bintval = IEEE80211_BINTVAL_DEFAULT; ic->ic_lintval = ic->ic_bintval; ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX; ieee80211_crypto_attach(ic); ieee80211_node_attach(ic); ieee80211_power_attach(ic); ieee80211_proto_attach(ic); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_attach(ic); #endif ieee80211_ht_attach(ic); ieee80211_vht_attach(ic); ieee80211_scan_attach(ic); ieee80211_regdomain_attach(ic); ieee80211_dfs_attach(ic); ieee80211_sysctl_attach(ic); mtx_lock(&ic_list_mtx); LIST_INSERT_HEAD(&ic_head, ic, ic_next); mtx_unlock(&ic_list_mtx); } /* * Detach net80211 state on device detach. Tear down * all vap's and reclaim all common state prior to the * device state going away. Note we may call back into * driver; it must be prepared for this. */ void ieee80211_ifdetach(struct ieee80211com *ic) { struct ieee80211vap *vap; mtx_lock(&ic_list_mtx); LIST_REMOVE(ic, ic_next); mtx_unlock(&ic_list_mtx); taskqueue_drain(taskqueue_thread, &ic->ic_restart_task); /* * The VAP is responsible for setting and clearing * the VIMAGE context. */ while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL) ieee80211_vap_destroy(vap); ieee80211_waitfor_parent(ic); ieee80211_sysctl_detach(ic); ieee80211_dfs_detach(ic); ieee80211_regdomain_detach(ic); ieee80211_scan_detach(ic); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_detach(ic); #endif ieee80211_vht_detach(ic); ieee80211_ht_detach(ic); /* NB: must be called before ieee80211_node_detach */ ieee80211_proto_detach(ic); ieee80211_crypto_detach(ic); ieee80211_power_detach(ic); ieee80211_node_detach(ic); counter_u64_free(ic->ic_ierrors); counter_u64_free(ic->ic_oerrors); taskqueue_free(ic->ic_tq); IEEE80211_TX_LOCK_DESTROY(ic); IEEE80211_LOCK_DESTROY(ic); } struct ieee80211com * ieee80211_find_com(const char *name) { struct ieee80211com *ic; mtx_lock(&ic_list_mtx); LIST_FOREACH(ic, &ic_head, ic_next) if (strcmp(ic->ic_name, name) == 0) break; mtx_unlock(&ic_list_mtx); return (ic); } void ieee80211_iterate_coms(ieee80211_com_iter_func *f, void *arg) { struct ieee80211com *ic; mtx_lock(&ic_list_mtx); LIST_FOREACH(ic, &ic_head, ic_next) (*f)(arg, ic); mtx_unlock(&ic_list_mtx); } /* * Default reset method for use with the ioctl support. This * method is invoked after any state change in the 802.11 * layer that should be propagated to the hardware but not * require re-initialization of the 802.11 state machine (e.g * rescanning for an ap). We always return ENETRESET which * should cause the driver to re-initialize the device. Drivers * can override this method to implement more optimized support. */ static int default_reset(struct ieee80211vap *vap, u_long cmd) { return ENETRESET; } /* * Default for updating the VAP default TX key index. * * Drivers that support TX offload as well as hardware encryption offload * may need to be informed of key index changes separate from the key * update. */ static void default_update_deftxkey(struct ieee80211vap *vap, ieee80211_keyix kid) { /* XXX assert validity */ /* XXX assert we're in a key update block */ vap->iv_def_txkey = kid; } /* * Add underlying device errors to vap errors. */ static uint64_t ieee80211_get_counter(struct ifnet *ifp, ift_counter cnt) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; uint64_t rv; rv = if_get_counter_default(ifp, cnt); switch (cnt) { case IFCOUNTER_OERRORS: rv += counter_u64_fetch(ic->ic_oerrors); break; case IFCOUNTER_IERRORS: rv += counter_u64_fetch(ic->ic_ierrors); break; default: break; } return (rv); } /* * Prepare a vap for use. Drivers use this call to * setup net80211 state in new vap's prior attaching * them with ieee80211_vap_attach (below). */ int ieee80211_vap_setup(struct ieee80211com *ic, struct ieee80211vap *vap, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN]) { struct ifnet *ifp; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { ic_printf(ic, "%s: unable to allocate ifnet\n", __func__); return ENOMEM; } if_initname(ifp, name, unit); ifp->if_softc = vap; /* back pointer */ ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_transmit = ieee80211_vap_transmit; ifp->if_qflush = ieee80211_vap_qflush; ifp->if_ioctl = ieee80211_ioctl; ifp->if_init = ieee80211_init; ifp->if_get_counter = ieee80211_get_counter; vap->iv_ifp = ifp; vap->iv_ic = ic; vap->iv_flags = ic->ic_flags; /* propagate common flags */ vap->iv_flags_ext = ic->ic_flags_ext; vap->iv_flags_ven = ic->ic_flags_ven; vap->iv_caps = ic->ic_caps &~ IEEE80211_C_OPMODE; /* 11n capabilities - XXX methodize */ vap->iv_htcaps = ic->ic_htcaps; vap->iv_htextcaps = ic->ic_htextcaps; /* 11ac capabilities - XXX methodize */ vap->iv_vhtcaps = ic->ic_vhtcaps; vap->iv_vhtextcaps = ic->ic_vhtextcaps; vap->iv_opmode = opmode; vap->iv_caps |= ieee80211_opcap[opmode]; IEEE80211_ADDR_COPY(vap->iv_myaddr, ic->ic_macaddr); switch (opmode) { case IEEE80211_M_WDS: /* * WDS links must specify the bssid of the far end. * For legacy operation this is a static relationship. * For non-legacy operation the station must associate * and be authorized to pass traffic. Plumbing the * vap to the proper node happens when the vap * transitions to RUN state. */ IEEE80211_ADDR_COPY(vap->iv_des_bssid, bssid); vap->iv_flags |= IEEE80211_F_DESBSSID; if (flags & IEEE80211_CLONE_WDSLEGACY) vap->iv_flags_ext |= IEEE80211_FEXT_WDSLEGACY; break; #ifdef IEEE80211_SUPPORT_TDMA case IEEE80211_M_AHDEMO: if (flags & IEEE80211_CLONE_TDMA) { /* NB: checked before clone operation allowed */ KASSERT(ic->ic_caps & IEEE80211_C_TDMA, ("not TDMA capable, ic_caps 0x%x", ic->ic_caps)); /* * Propagate TDMA capability to mark vap; this * cannot be removed and is used to distinguish * regular ahdemo operation from ahdemo+tdma. */ vap->iv_caps |= IEEE80211_C_TDMA; } break; #endif default: break; } /* auto-enable s/w beacon miss support */ if (flags & IEEE80211_CLONE_NOBEACONS) vap->iv_flags_ext |= IEEE80211_FEXT_SWBMISS; /* auto-generated or user supplied MAC address */ if (flags & (IEEE80211_CLONE_BSSID|IEEE80211_CLONE_MACADDR)) vap->iv_flags_ext |= IEEE80211_FEXT_UNIQMAC; /* * Enable various functionality by default if we're * capable; the driver can override us if it knows better. */ if (vap->iv_caps & IEEE80211_C_WME) vap->iv_flags |= IEEE80211_F_WME; if (vap->iv_caps & IEEE80211_C_BURST) vap->iv_flags |= IEEE80211_F_BURST; /* NB: bg scanning only makes sense for station mode right now */ if (vap->iv_opmode == IEEE80211_M_STA && (vap->iv_caps & IEEE80211_C_BGSCAN)) vap->iv_flags |= IEEE80211_F_BGSCAN; vap->iv_flags |= IEEE80211_F_DOTH; /* XXX no cap, just ena */ /* NB: DFS support only makes sense for ap mode right now */ if (vap->iv_opmode == IEEE80211_M_HOSTAP && (vap->iv_caps & IEEE80211_C_DFS)) vap->iv_flags_ext |= IEEE80211_FEXT_DFS; vap->iv_des_chan = IEEE80211_CHAN_ANYC; /* any channel is ok */ vap->iv_bmissthreshold = IEEE80211_HWBMISS_DEFAULT; vap->iv_dtim_period = IEEE80211_DTIM_DEFAULT; /* * Install a default reset method for the ioctl support; * the driver can override this. */ vap->iv_reset = default_reset; /* * Install a default crypto key update method, the driver * can override this. */ vap->iv_update_deftxkey = default_update_deftxkey; ieee80211_sysctl_vattach(vap); ieee80211_crypto_vattach(vap); ieee80211_node_vattach(vap); ieee80211_power_vattach(vap); ieee80211_proto_vattach(vap); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_vattach(vap); #endif ieee80211_ht_vattach(vap); ieee80211_vht_vattach(vap); ieee80211_scan_vattach(vap); ieee80211_regdomain_vattach(vap); ieee80211_radiotap_vattach(vap); ieee80211_ratectl_set(vap, IEEE80211_RATECTL_NONE); return 0; } /* * Activate a vap. State should have been prepared with a * call to ieee80211_vap_setup and by the driver. On return * from this call the vap is ready for use. */ int ieee80211_vap_attach(struct ieee80211vap *vap, ifm_change_cb_t media_change, ifm_stat_cb_t media_stat, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = vap->iv_ifp; struct ieee80211com *ic = vap->iv_ic; struct ifmediareq imr; int maxrate; IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s parent %s flags 0x%x flags_ext 0x%x\n", __func__, ieee80211_opmode_name[vap->iv_opmode], ic->ic_name, vap->iv_flags, vap->iv_flags_ext); /* * Do late attach work that cannot happen until after * the driver has had a chance to override defaults. */ ieee80211_node_latevattach(vap); ieee80211_power_latevattach(vap); maxrate = ieee80211_media_setup(ic, &vap->iv_media, vap->iv_caps, vap->iv_opmode == IEEE80211_M_STA, media_change, media_stat); ieee80211_media_status(ifp, &imr); /* NB: strip explicit mode; we're actually in autoselect */ ifmedia_set(&vap->iv_media, imr.ifm_active &~ (IFM_MMASK | IFM_IEEE80211_TURBO)); if (maxrate) ifp->if_baudrate = IF_Mbps(maxrate); ether_ifattach(ifp, macaddr); IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp)); /* hook output method setup by ether_ifattach */ vap->iv_output = ifp->if_output; ifp->if_output = ieee80211_output; /* NB: if_mtu set by ether_ifattach to ETHERMTU */ IEEE80211_LOCK(ic); TAILQ_INSERT_TAIL(&ic->ic_vaps, vap, iv_next); ieee80211_syncflag_locked(ic, IEEE80211_F_WME); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP); #endif ieee80211_syncflag_locked(ic, IEEE80211_F_PCF); ieee80211_syncflag_locked(ic, IEEE80211_F_BURST); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_VHT); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80P80); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT160); IEEE80211_UNLOCK(ic); return 1; } /* * Tear down vap state and reclaim the ifnet. * The driver is assumed to have prepared for * this; e.g. by turning off interrupts for the * underlying device. */ void ieee80211_vap_detach(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; CURVNET_SET(ifp->if_vnet); IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s parent %s\n", __func__, ieee80211_opmode_name[vap->iv_opmode], ic->ic_name); /* NB: bpfdetach is called by ether_ifdetach and claims all taps */ ether_ifdetach(ifp); ieee80211_stop(vap); /* * Flush any deferred vap tasks. */ ieee80211_draintask(ic, &vap->iv_nstate_task); ieee80211_draintask(ic, &vap->iv_swbmiss_task); /* XXX band-aid until ifnet handles this for us */ taskqueue_drain(taskqueue_swi, &ifp->if_linktask); IEEE80211_LOCK(ic); KASSERT(vap->iv_state == IEEE80211_S_INIT , ("vap still running")); TAILQ_REMOVE(&ic->ic_vaps, vap, iv_next); ieee80211_syncflag_locked(ic, IEEE80211_F_WME); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP); #endif ieee80211_syncflag_locked(ic, IEEE80211_F_PCF); ieee80211_syncflag_locked(ic, IEEE80211_F_BURST); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT); ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_VHT); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT40); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT80P80); ieee80211_syncflag_vht_locked(ic, IEEE80211_FVHT_USEVHT160); /* NB: this handles the bpfdetach done below */ ieee80211_syncflag_ext_locked(ic, IEEE80211_FEXT_BPF); if (vap->iv_ifflags & IFF_PROMISC) ieee80211_promisc(vap, false); if (vap->iv_ifflags & IFF_ALLMULTI) ieee80211_allmulti(vap, false); IEEE80211_UNLOCK(ic); ifmedia_removeall(&vap->iv_media); ieee80211_radiotap_vdetach(vap); ieee80211_regdomain_vdetach(vap); ieee80211_scan_vdetach(vap); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_vdetach(vap); #endif ieee80211_vht_vdetach(vap); ieee80211_ht_vdetach(vap); /* NB: must be before ieee80211_node_vdetach */ ieee80211_proto_vdetach(vap); ieee80211_crypto_vdetach(vap); ieee80211_power_vdetach(vap); ieee80211_node_vdetach(vap); ieee80211_sysctl_vdetach(vap); if_free(ifp); CURVNET_RESTORE(); } /* * Count number of vaps in promisc, and issue promisc on * parent respectively. */ void ieee80211_promisc(struct ieee80211vap *vap, bool on) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK_ASSERT(ic); if (on) { if (++ic->ic_promisc == 1) ieee80211_runtask(ic, &ic->ic_promisc_task); } else { KASSERT(ic->ic_promisc > 0, ("%s: ic %p not promisc", __func__, ic)); if (--ic->ic_promisc == 0) ieee80211_runtask(ic, &ic->ic_promisc_task); } } /* * Count number of vaps in allmulti, and issue allmulti on * parent respectively. */ void ieee80211_allmulti(struct ieee80211vap *vap, bool on) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK_ASSERT(ic); if (on) { if (++ic->ic_allmulti == 1) ieee80211_runtask(ic, &ic->ic_mcast_task); } else { KASSERT(ic->ic_allmulti > 0, ("%s: ic %p not allmulti", __func__, ic)); if (--ic->ic_allmulti == 0) ieee80211_runtask(ic, &ic->ic_mcast_task); } } /* * Synchronize flag bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags & flag) { bit = 1; break; } if (bit) ic->ic_flags |= flag; else ic->ic_flags &= ~flag; } void ieee80211_syncflag(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags &= ~flag; } else vap->iv_flags |= flag; ieee80211_syncflag_locked(ic, flag); IEEE80211_UNLOCK(ic); } /* * Synchronize flags_ht bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags_ht & flag) { bit = 1; break; } if (bit) ic->ic_flags_ht |= flag; else ic->ic_flags_ht &= ~flag; } void ieee80211_syncflag_ht(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags_ht &= ~flag; } else vap->iv_flags_ht |= flag; ieee80211_syncflag_ht_locked(ic, flag); IEEE80211_UNLOCK(ic); } /* * Synchronize flags_vht bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_vht_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags_vht & flag) { bit = 1; break; } if (bit) ic->ic_flags_vht |= flag; else ic->ic_flags_vht &= ~flag; } void ieee80211_syncflag_vht(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags_vht &= ~flag; } else vap->iv_flags_vht |= flag; ieee80211_syncflag_vht_locked(ic, flag); IEEE80211_UNLOCK(ic); } /* * Synchronize flags_ext bit state in the com structure * according to the state of all vap's. This is used, * for example, to handle state changes via ioctls. */ static void ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag) { struct ieee80211vap *vap; int bit; IEEE80211_LOCK_ASSERT(ic); bit = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_flags_ext & flag) { bit = 1; break; } if (bit) ic->ic_flags_ext |= flag; else ic->ic_flags_ext &= ~flag; } void ieee80211_syncflag_ext(struct ieee80211vap *vap, int flag) { struct ieee80211com *ic = vap->iv_ic; IEEE80211_LOCK(ic); if (flag < 0) { flag = -flag; vap->iv_flags_ext &= ~flag; } else vap->iv_flags_ext |= flag; ieee80211_syncflag_ext_locked(ic, flag); IEEE80211_UNLOCK(ic); } static __inline int mapgsm(u_int freq, u_int flags) { freq *= 10; if (flags & IEEE80211_CHAN_QUARTER) freq += 5; else if (flags & IEEE80211_CHAN_HALF) freq += 10; else freq += 20; /* NB: there is no 907/20 wide but leave room */ return (freq - 906*10) / 5; } static __inline int mappsb(u_int freq, u_int flags) { return 37 + ((freq * 10) + ((freq % 5) == 2 ? 5 : 0) - 49400) / 5; } /* * Convert MHz frequency to IEEE channel number. */ int ieee80211_mhz2ieee(u_int freq, u_int flags) { #define IS_FREQ_IN_PSB(_freq) ((_freq) > 4940 && (_freq) < 4990) if (flags & IEEE80211_CHAN_GSM) return mapgsm(freq, flags); if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */ if (freq == 2484) return 14; if (freq < 2484) return ((int) freq - 2407) / 5; else return 15 + ((freq - 2512) / 20); } else if (flags & IEEE80211_CHAN_5GHZ) { /* 5Ghz band */ if (freq <= 5000) { /* XXX check regdomain? */ if (IS_FREQ_IN_PSB(freq)) return mappsb(freq, flags); return (freq - 4000) / 5; } else return (freq - 5000) / 5; } else { /* either, guess */ if (freq == 2484) return 14; if (freq < 2484) { if (907 <= freq && freq <= 922) return mapgsm(freq, flags); return ((int) freq - 2407) / 5; } if (freq < 5000) { if (IS_FREQ_IN_PSB(freq)) return mappsb(freq, flags); else if (freq > 4900) return (freq - 4000) / 5; else return 15 + ((freq - 2512) / 20); } return (freq - 5000) / 5; } #undef IS_FREQ_IN_PSB } /* * Convert channel to IEEE channel number. */ int ieee80211_chan2ieee(struct ieee80211com *ic, const struct ieee80211_channel *c) { if (c == NULL) { ic_printf(ic, "invalid channel (NULL)\n"); return 0; /* XXX */ } return (c == IEEE80211_CHAN_ANYC ? IEEE80211_CHAN_ANY : c->ic_ieee); } /* * Convert IEEE channel number to MHz frequency. */ u_int ieee80211_ieee2mhz(u_int chan, u_int flags) { if (flags & IEEE80211_CHAN_GSM) return 907 + 5 * (chan / 10); if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */ if (chan == 14) return 2484; if (chan < 14) return 2407 + chan*5; else return 2512 + ((chan-15)*20); } else if (flags & IEEE80211_CHAN_5GHZ) {/* 5Ghz band */ if (flags & (IEEE80211_CHAN_HALF|IEEE80211_CHAN_QUARTER)) { chan -= 37; return 4940 + chan*5 + (chan % 5 ? 2 : 0); } return 5000 + (chan*5); } else { /* either, guess */ /* XXX can't distinguish PSB+GSM channels */ if (chan == 14) return 2484; if (chan < 14) /* 0-13 */ return 2407 + chan*5; if (chan < 27) /* 15-26 */ return 2512 + ((chan-15)*20); return 5000 + (chan*5); } } static __inline void set_extchan(struct ieee80211_channel *c) { /* * IEEE Std 802.11-2012, page 1738, subclause 20.3.15.4: * "the secondary channel number shall be 'N + [1,-1] * 4' */ if (c->ic_flags & IEEE80211_CHAN_HT40U) c->ic_extieee = c->ic_ieee + 4; else if (c->ic_flags & IEEE80211_CHAN_HT40D) c->ic_extieee = c->ic_ieee - 4; else c->ic_extieee = 0; } /* * Populate the freq1/freq2 fields as appropriate for VHT channels. * * This for now uses a hard-coded list of 80MHz wide channels. * * For HT20/HT40, freq1 just is the centre frequency of the 40MHz * wide channel we've already decided upon. * * For VHT80 and VHT160, there are only a small number of fixed * 80/160MHz wide channels, so we just use those. * * This is all likely very very wrong - both the regulatory code * and this code needs to ensure that all four channels are * available and valid before the VHT80 (and eight for VHT160) channel * is created. */ struct vht_chan_range { uint16_t freq_start; uint16_t freq_end; }; struct vht_chan_range vht80_chan_ranges[] = { { 5170, 5250 }, { 5250, 5330 }, { 5490, 5570 }, { 5570, 5650 }, { 5650, 5730 }, { 5735, 5815 }, { 0, 0, } }; static int set_vht_extchan(struct ieee80211_channel *c) { int i; if (! IEEE80211_IS_CHAN_VHT(c)) { return (0); } if (IEEE80211_IS_CHAN_VHT20(c)) { c->ic_vht_ch_freq1 = c->ic_ieee; return (1); } if (IEEE80211_IS_CHAN_VHT40(c)) { if (IEEE80211_IS_CHAN_HT40U(c)) c->ic_vht_ch_freq1 = c->ic_ieee + 2; else if (IEEE80211_IS_CHAN_HT40D(c)) c->ic_vht_ch_freq1 = c->ic_ieee - 2; else return (0); return (1); } if (IEEE80211_IS_CHAN_VHT80(c)) { for (i = 0; vht80_chan_ranges[i].freq_start != 0; i++) { if (c->ic_freq >= vht80_chan_ranges[i].freq_start && c->ic_freq < vht80_chan_ranges[i].freq_end) { int midpoint; midpoint = vht80_chan_ranges[i].freq_start + 40; c->ic_vht_ch_freq1 = ieee80211_mhz2ieee(midpoint, c->ic_flags); c->ic_vht_ch_freq2 = 0; #if 0 printf("%s: %d, freq=%d, midpoint=%d, freq1=%d, freq2=%d\n", __func__, c->ic_ieee, c->ic_freq, midpoint, c->ic_vht_ch_freq1, c->ic_vht_ch_freq2); #endif return (1); } } return (0); } printf("%s: unknown VHT channel type (ieee=%d, flags=0x%08x)\n", __func__, c->ic_ieee, c->ic_flags); return (0); } /* * Return whether the current channel could possibly be a part of * a VHT80 channel. * * This doesn't check that the whole range is in the allowed list * according to regulatory. */ static int is_vht80_valid_freq(uint16_t freq) { int i; for (i = 0; vht80_chan_ranges[i].freq_start != 0; i++) { if (freq >= vht80_chan_ranges[i].freq_start && freq < vht80_chan_ranges[i].freq_end) return (1); } return (0); } static int addchan(struct ieee80211_channel chans[], int maxchans, int *nchans, uint8_t ieee, uint16_t freq, int8_t maxregpower, uint32_t flags) { struct ieee80211_channel *c; if (*nchans >= maxchans) return (ENOBUFS); #if 0 printf("%s: %d: ieee=%d, freq=%d, flags=0x%08x\n", __func__, *nchans, ieee, freq, flags); #endif c = &chans[(*nchans)++]; c->ic_ieee = ieee; c->ic_freq = freq != 0 ? freq : ieee80211_ieee2mhz(ieee, flags); c->ic_maxregpower = maxregpower; c->ic_maxpower = 2 * maxregpower; c->ic_flags = flags; c->ic_vht_ch_freq1 = 0; c->ic_vht_ch_freq2 = 0; set_extchan(c); set_vht_extchan(c); return (0); } static int copychan_prev(struct ieee80211_channel chans[], int maxchans, int *nchans, uint32_t flags) { struct ieee80211_channel *c; KASSERT(*nchans > 0, ("channel list is empty\n")); if (*nchans >= maxchans) return (ENOBUFS); #if 0 printf("%s: %d: flags=0x%08x\n", __func__, *nchans, flags); #endif c = &chans[(*nchans)++]; c[0] = c[-1]; c->ic_flags = flags; c->ic_vht_ch_freq1 = 0; c->ic_vht_ch_freq2 = 0; set_extchan(c); set_vht_extchan(c); return (0); } /* * XXX VHT-2GHz */ static void getflags_2ghz(const uint8_t bands[], uint32_t flags[], int ht40) { int nmodes; nmodes = 0; if (isset(bands, IEEE80211_MODE_11B)) flags[nmodes++] = IEEE80211_CHAN_B; if (isset(bands, IEEE80211_MODE_11G)) flags[nmodes++] = IEEE80211_CHAN_G; if (isset(bands, IEEE80211_MODE_11NG)) flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT20; if (ht40) { flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT40U; flags[nmodes++] = IEEE80211_CHAN_G | IEEE80211_CHAN_HT40D; } flags[nmodes] = 0; } static void getflags_5ghz(const uint8_t bands[], uint32_t flags[], int ht40, int vht80) { int nmodes; /* * the addchan_list function seems to expect the flags array to * be in channel width order, so the VHT bits are interspersed * as appropriate to maintain said order. * * It also assumes HT40U is before HT40D. */ nmodes = 0; /* 20MHz */ if (isset(bands, IEEE80211_MODE_11A)) flags[nmodes++] = IEEE80211_CHAN_A; if (isset(bands, IEEE80211_MODE_11NA)) flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT20; if (isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT20 | IEEE80211_CHAN_VHT20; + } /* 40MHz */ if (ht40) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U; } if (ht40 && isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U | IEEE80211_CHAN_VHT40U; } if (ht40) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D; } if (ht40 && isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D | IEEE80211_CHAN_VHT40D; } /* 80MHz */ if (vht80 && isset(bands, IEEE80211_MODE_VHT_5GHZ)) { flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U | IEEE80211_CHAN_VHT80; flags[nmodes++] = IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D | IEEE80211_CHAN_VHT80; - } } /* XXX VHT80+80 */ /* XXX VHT160 */ flags[nmodes] = 0; } static void getflags(const uint8_t bands[], uint32_t flags[], int ht40, int vht80) { flags[0] = 0; if (isset(bands, IEEE80211_MODE_11A) || isset(bands, IEEE80211_MODE_11NA) || isset(bands, IEEE80211_MODE_VHT_5GHZ)) { if (isset(bands, IEEE80211_MODE_11B) || isset(bands, IEEE80211_MODE_11G) || isset(bands, IEEE80211_MODE_11NG) || isset(bands, IEEE80211_MODE_VHT_2GHZ)) return; getflags_5ghz(bands, flags, ht40, vht80); } else getflags_2ghz(bands, flags, ht40); } /* * Add one 20 MHz channel into specified channel list. */ /* XXX VHT */ int ieee80211_add_channel(struct ieee80211_channel chans[], int maxchans, int *nchans, uint8_t ieee, uint16_t freq, int8_t maxregpower, uint32_t chan_flags, const uint8_t bands[]) { uint32_t flags[IEEE80211_MODE_MAX]; int i, error; getflags(bands, flags, 0, 0); KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__)); error = addchan(chans, maxchans, nchans, ieee, freq, maxregpower, flags[0] | chan_flags); for (i = 1; flags[i] != 0 && error == 0; i++) { error = copychan_prev(chans, maxchans, nchans, flags[i] | chan_flags); } return (error); } static struct ieee80211_channel * findchannel(struct ieee80211_channel chans[], int nchans, uint16_t freq, uint32_t flags) { struct ieee80211_channel *c; int i; flags &= IEEE80211_CHAN_ALLTURBO; /* brute force search */ for (i = 0; i < nchans; i++) { c = &chans[i]; if (c->ic_freq == freq && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; } return NULL; } /* * Add 40 MHz channel pair into specified channel list. */ /* XXX VHT */ int ieee80211_add_channel_ht40(struct ieee80211_channel chans[], int maxchans, int *nchans, uint8_t ieee, int8_t maxregpower, uint32_t flags) { struct ieee80211_channel *cent, *extc; uint16_t freq; int error; freq = ieee80211_ieee2mhz(ieee, flags); /* * Each entry defines an HT40 channel pair; find the * center channel, then the extension channel above. */ flags |= IEEE80211_CHAN_HT20; cent = findchannel(chans, *nchans, freq, flags); if (cent == NULL) return (EINVAL); extc = findchannel(chans, *nchans, freq + 20, flags); if (extc == NULL) return (ENOENT); flags &= ~IEEE80211_CHAN_HT; error = addchan(chans, maxchans, nchans, cent->ic_ieee, cent->ic_freq, maxregpower, flags | IEEE80211_CHAN_HT40U); if (error != 0) return (error); error = addchan(chans, maxchans, nchans, extc->ic_ieee, extc->ic_freq, maxregpower, flags | IEEE80211_CHAN_HT40D); return (error); } /* * Fetch the center frequency for the primary channel. */ uint32_t ieee80211_get_channel_center_freq(const struct ieee80211_channel *c) { return (c->ic_freq); } /* * Fetch the center frequency for the primary BAND channel. * * For 5, 10, 20MHz channels it'll be the normally configured channel * frequency. * * For 40MHz, 80MHz, 160Mhz channels it'll the the centre of the * wide channel, not the centre of the primary channel (that's ic_freq). * * For 80+80MHz channels this will be the centre of the primary * 80MHz channel; the secondary 80MHz channel will be center_freq2(). */ uint32_t ieee80211_get_channel_center_freq1(const struct ieee80211_channel *c) { /* * VHT - use the pre-calculated centre frequency * of the given channel. */ if (IEEE80211_IS_CHAN_VHT(c)) return (ieee80211_ieee2mhz(c->ic_vht_ch_freq1, c->ic_flags)); if (IEEE80211_IS_CHAN_HT40U(c)) { return (c->ic_freq + 10); } if (IEEE80211_IS_CHAN_HT40D(c)) { return (c->ic_freq - 10); } return (c->ic_freq); } /* * For now, no 80+80 support; it will likely always return 0. */ uint32_t ieee80211_get_channel_center_freq2(const struct ieee80211_channel *c) { if (IEEE80211_IS_CHAN_VHT(c) && (c->ic_vht_ch_freq2 != 0)) return (ieee80211_ieee2mhz(c->ic_vht_ch_freq2, c->ic_flags)); return (0); } /* * Adds channels into specified channel list (ieee[] array must be sorted). * Channels are already sorted. */ static int add_chanlist(struct ieee80211_channel chans[], int maxchans, int *nchans, const uint8_t ieee[], int nieee, uint32_t flags[]) { uint16_t freq; int i, j, error; int is_vht; for (i = 0; i < nieee; i++) { freq = ieee80211_ieee2mhz(ieee[i], flags[0]); for (j = 0; flags[j] != 0; j++) { /* * Notes: * + HT40 and VHT40 channels occur together, so * we need to be careful that we actually allow that. * + VHT80, VHT160 will coexist with HT40/VHT40, so * make sure it's not skipped because of the overlap * check used for (V)HT40. */ is_vht = !! (flags[j] & IEEE80211_CHAN_VHT); /* * Test for VHT80. * XXX This is all very broken right now. * What we /should/ do is: * * + check that the frequency is in the list of * allowed VHT80 ranges; and * + the other 3 channels in the list are actually * also available. */ if (is_vht && flags[j] & IEEE80211_CHAN_VHT80) if (! is_vht80_valid_freq(freq)) continue; /* * Test for (V)HT40. * * This is also a fall through from VHT80; as we only * allow a VHT80 channel if the VHT40 combination is * also valid. If the VHT40 form is not valid then * we certainly can't do VHT80.. */ if (flags[j] & IEEE80211_CHAN_HT40D) /* * Can't have a "lower" channel if we are the * first channel. * * Can't have a "lower" channel if it's below/ * within 20MHz of the first channel. * * Can't have a "lower" channel if the channel * below it is not 20MHz away. */ if (i == 0 || ieee[i] < ieee[0] + 4 || freq - 20 != ieee80211_ieee2mhz(ieee[i] - 4, flags[j])) continue; if (flags[j] & IEEE80211_CHAN_HT40U) /* * Can't have an "upper" channel if we are * the last channel. * * Can't have an "upper" channel be above the * last channel in the list. * * Can't have an "upper" channel if the next * channel according to the math isn't 20MHz * away. (Likely for channel 13/14.) */ if (i == nieee - 1 || ieee[i] + 4 > ieee[nieee - 1] || freq + 20 != ieee80211_ieee2mhz(ieee[i] + 4, flags[j])) continue; if (j == 0) { error = addchan(chans, maxchans, nchans, ieee[i], freq, 0, flags[j]); } else { error = copychan_prev(chans, maxchans, nchans, flags[j]); } if (error != 0) return (error); } } return (0); } int ieee80211_add_channel_list_2ghz(struct ieee80211_channel chans[], int maxchans, int *nchans, const uint8_t ieee[], int nieee, const uint8_t bands[], int ht40) { uint32_t flags[IEEE80211_MODE_MAX]; /* XXX no VHT for now */ getflags_2ghz(bands, flags, ht40); KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__)); return (add_chanlist(chans, maxchans, nchans, ieee, nieee, flags)); } int ieee80211_add_channel_list_5ghz(struct ieee80211_channel chans[], int maxchans, int *nchans, const uint8_t ieee[], int nieee, const uint8_t bands[], int ht40) { uint32_t flags[IEEE80211_MODE_MAX]; int vht80 = 0; /* * For now, assume VHT == VHT80 support as a minimum. */ if (isset(bands, IEEE80211_MODE_VHT_5GHZ)) vht80 = 1; getflags_5ghz(bands, flags, ht40, vht80); KASSERT(flags[0] != 0, ("%s: no correct mode provided\n", __func__)); return (add_chanlist(chans, maxchans, nchans, ieee, nieee, flags)); } /* * Locate a channel given a frequency+flags. We cache * the previous lookup to optimize switching between two * channels--as happens with dynamic turbo. */ struct ieee80211_channel * ieee80211_find_channel(struct ieee80211com *ic, int freq, int flags) { struct ieee80211_channel *c; flags &= IEEE80211_CHAN_ALLTURBO; c = ic->ic_prevchan; if (c != NULL && c->ic_freq == freq && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; /* brute force search */ return (findchannel(ic->ic_channels, ic->ic_nchans, freq, flags)); } /* * Locate a channel given a channel number+flags. We cache * the previous lookup to optimize switching between two * channels--as happens with dynamic turbo. */ struct ieee80211_channel * ieee80211_find_channel_byieee(struct ieee80211com *ic, int ieee, int flags) { struct ieee80211_channel *c; int i; flags &= IEEE80211_CHAN_ALLTURBO; c = ic->ic_prevchan; if (c != NULL && c->ic_ieee == ieee && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; /* brute force search */ for (i = 0; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; if (c->ic_ieee == ieee && (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags) return c; } return NULL; } /* * Lookup a channel suitable for the given rx status. * * This is used to find a channel for a frame (eg beacon, probe * response) based purely on the received PHY information. * * For now it tries to do it based on R_FREQ / R_IEEE. * This is enough for 11bg and 11a (and thus 11ng/11na) * but it will not be enough for GSM, PSB channels and the * like. It also doesn't know about legacy-turbog and * legacy-turbo modes, which some offload NICs actually * support in weird ways. * * Takes the ic and rxstatus; returns the channel or NULL * if not found. * * XXX TODO: Add support for that when the need arises. */ struct ieee80211_channel * ieee80211_lookup_channel_rxstatus(struct ieee80211vap *vap, const struct ieee80211_rx_stats *rxs) { struct ieee80211com *ic = vap->iv_ic; uint32_t flags; struct ieee80211_channel *c; if (rxs == NULL) return (NULL); /* * Strictly speaking we only use freq for now, * however later on we may wish to just store * the ieee for verification. */ if ((rxs->r_flags & IEEE80211_R_FREQ) == 0) return (NULL); if ((rxs->r_flags & IEEE80211_R_IEEE) == 0) return (NULL); /* * If the rx status contains a valid ieee/freq, then * ensure we populate the correct channel information * in rxchan before passing it up to the scan infrastructure. * Offload NICs will pass up beacons from all channels * during background scans. */ /* Determine a band */ /* XXX should be done by the driver? */ if (rxs->c_freq < 3000) { flags = IEEE80211_CHAN_G; } else { flags = IEEE80211_CHAN_A; } /* Channel lookup */ c = ieee80211_find_channel(ic, rxs->c_freq, flags); IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT, "%s: freq=%d, ieee=%d, flags=0x%08x; c=%p\n", __func__, (int) rxs->c_freq, (int) rxs->c_ieee, flags, c); return (c); } static void addmedia(struct ifmedia *media, int caps, int addsta, int mode, int mword) { #define ADD(_ic, _s, _o) \ ifmedia_add(media, \ IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL) static const u_int mopts[IEEE80211_MODE_MAX] = { [IEEE80211_MODE_AUTO] = IFM_AUTO, [IEEE80211_MODE_11A] = IFM_IEEE80211_11A, [IEEE80211_MODE_11B] = IFM_IEEE80211_11B, [IEEE80211_MODE_11G] = IFM_IEEE80211_11G, [IEEE80211_MODE_FH] = IFM_IEEE80211_FH, [IEEE80211_MODE_TURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO, [IEEE80211_MODE_TURBO_G] = IFM_IEEE80211_11G|IFM_IEEE80211_TURBO, [IEEE80211_MODE_STURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO, [IEEE80211_MODE_HALF] = IFM_IEEE80211_11A, /* XXX */ [IEEE80211_MODE_QUARTER] = IFM_IEEE80211_11A, /* XXX */ [IEEE80211_MODE_11NA] = IFM_IEEE80211_11NA, [IEEE80211_MODE_11NG] = IFM_IEEE80211_11NG, [IEEE80211_MODE_VHT_2GHZ] = IFM_IEEE80211_VHT2G, [IEEE80211_MODE_VHT_5GHZ] = IFM_IEEE80211_VHT5G, }; u_int mopt; mopt = mopts[mode]; if (addsta) ADD(ic, mword, mopt); /* STA mode has no cap */ if (caps & IEEE80211_C_IBSS) ADD(media, mword, mopt | IFM_IEEE80211_ADHOC); if (caps & IEEE80211_C_HOSTAP) ADD(media, mword, mopt | IFM_IEEE80211_HOSTAP); if (caps & IEEE80211_C_AHDEMO) ADD(media, mword, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0); if (caps & IEEE80211_C_MONITOR) ADD(media, mword, mopt | IFM_IEEE80211_MONITOR); if (caps & IEEE80211_C_WDS) ADD(media, mword, mopt | IFM_IEEE80211_WDS); if (caps & IEEE80211_C_MBSS) ADD(media, mword, mopt | IFM_IEEE80211_MBSS); #undef ADD } /* * Setup the media data structures according to the channel and * rate tables. */ static int ieee80211_media_setup(struct ieee80211com *ic, struct ifmedia *media, int caps, int addsta, ifm_change_cb_t media_change, ifm_stat_cb_t media_stat) { int i, j, rate, maxrate, mword, r; enum ieee80211_phymode mode; const struct ieee80211_rateset *rs; struct ieee80211_rateset allrates; /* * Fill in media characteristics. */ ifmedia_init(media, 0, media_change, media_stat); maxrate = 0; /* * Add media for legacy operating modes. */ memset(&allrates, 0, sizeof(allrates)); for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_11NA; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; addmedia(media, caps, addsta, mode, IFM_AUTO); if (mode == IEEE80211_MODE_AUTO) continue; rs = &ic->ic_sup_rates[mode]; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; mword = ieee80211_rate2media(ic, rate, mode); if (mword == 0) continue; addmedia(media, caps, addsta, mode, mword); /* * Add legacy rate to the collection of all rates. */ r = rate & IEEE80211_RATE_VAL; for (j = 0; j < allrates.rs_nrates; j++) if (allrates.rs_rates[j] == r) break; if (j == allrates.rs_nrates) { /* unique, add to the set */ allrates.rs_rates[j] = r; allrates.rs_nrates++; } rate = (rate & IEEE80211_RATE_VAL) / 2; if (rate > maxrate) maxrate = rate; } } for (i = 0; i < allrates.rs_nrates; i++) { mword = ieee80211_rate2media(ic, allrates.rs_rates[i], IEEE80211_MODE_AUTO); if (mword == 0) continue; /* NB: remove media options from mword */ addmedia(media, caps, addsta, IEEE80211_MODE_AUTO, IFM_SUBTYPE(mword)); } /* * Add HT/11n media. Note that we do not have enough * bits in the media subtype to express the MCS so we * use a "placeholder" media subtype and any fixed MCS * must be specified with a different mechanism. */ for (; mode <= IEEE80211_MODE_11NG; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; addmedia(media, caps, addsta, mode, IFM_AUTO); addmedia(media, caps, addsta, mode, IFM_IEEE80211_MCS); } if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA) || isset(ic->ic_modecaps, IEEE80211_MODE_11NG)) { addmedia(media, caps, addsta, IEEE80211_MODE_AUTO, IFM_IEEE80211_MCS); i = ic->ic_txstream * 8 - 1; if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) && (ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40)) rate = ieee80211_htrates[i].ht40_rate_400ns; else if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40)) rate = ieee80211_htrates[i].ht40_rate_800ns; else if ((ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20)) rate = ieee80211_htrates[i].ht20_rate_400ns; else rate = ieee80211_htrates[i].ht20_rate_800ns; if (rate > maxrate) maxrate = rate; } /* * Add VHT media. */ for (; mode <= IEEE80211_MODE_VHT_5GHZ; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; addmedia(media, caps, addsta, mode, IFM_AUTO); addmedia(media, caps, addsta, mode, IFM_IEEE80211_VHT); /* XXX TODO: VHT maxrate */ } return maxrate; } /* XXX inline or eliminate? */ const struct ieee80211_rateset * ieee80211_get_suprates(struct ieee80211com *ic, const struct ieee80211_channel *c) { /* XXX does this work for 11ng basic rates? */ return &ic->ic_sup_rates[ieee80211_chan2mode(c)]; } void ieee80211_announce(struct ieee80211com *ic) { int i, rate, mword; enum ieee80211_phymode mode; const struct ieee80211_rateset *rs; /* NB: skip AUTO since it has no rates */ for (mode = IEEE80211_MODE_AUTO+1; mode < IEEE80211_MODE_11NA; mode++) { if (isclr(ic->ic_modecaps, mode)) continue; ic_printf(ic, "%s rates: ", ieee80211_phymode_name[mode]); rs = &ic->ic_sup_rates[mode]; for (i = 0; i < rs->rs_nrates; i++) { mword = ieee80211_rate2media(ic, rs->rs_rates[i], mode); if (mword == 0) continue; rate = ieee80211_media2rate(mword); printf("%s%d%sMbps", (i != 0 ? " " : ""), rate / 2, ((rate & 0x1) != 0 ? ".5" : "")); } printf("\n"); } ieee80211_ht_announce(ic); ieee80211_vht_announce(ic); } void ieee80211_announce_channels(struct ieee80211com *ic) { const struct ieee80211_channel *c; char type; int i, cw; printf("Chan Freq CW RegPwr MinPwr MaxPwr\n"); for (i = 0; i < ic->ic_nchans; i++) { c = &ic->ic_channels[i]; if (IEEE80211_IS_CHAN_ST(c)) type = 'S'; else if (IEEE80211_IS_CHAN_108A(c)) type = 'T'; else if (IEEE80211_IS_CHAN_108G(c)) type = 'G'; else if (IEEE80211_IS_CHAN_HT(c)) type = 'n'; else if (IEEE80211_IS_CHAN_A(c)) type = 'a'; else if (IEEE80211_IS_CHAN_ANYG(c)) type = 'g'; else if (IEEE80211_IS_CHAN_B(c)) type = 'b'; else type = 'f'; if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_TURBO(c)) cw = 40; else if (IEEE80211_IS_CHAN_HALF(c)) cw = 10; else if (IEEE80211_IS_CHAN_QUARTER(c)) cw = 5; else cw = 20; printf("%4d %4d%c %2d%c %6d %4d.%d %4d.%d\n" , c->ic_ieee, c->ic_freq, type , cw , IEEE80211_IS_CHAN_HT40U(c) ? '+' : IEEE80211_IS_CHAN_HT40D(c) ? '-' : ' ' , c->ic_maxregpower , c->ic_minpower / 2, c->ic_minpower & 1 ? 5 : 0 , c->ic_maxpower / 2, c->ic_maxpower & 1 ? 5 : 0 ); } } static int media2mode(const struct ifmedia_entry *ime, uint32_t flags, uint16_t *mode) { switch (IFM_MODE(ime->ifm_media)) { case IFM_IEEE80211_11A: *mode = IEEE80211_MODE_11A; break; case IFM_IEEE80211_11B: *mode = IEEE80211_MODE_11B; break; case IFM_IEEE80211_11G: *mode = IEEE80211_MODE_11G; break; case IFM_IEEE80211_FH: *mode = IEEE80211_MODE_FH; break; case IFM_IEEE80211_11NA: *mode = IEEE80211_MODE_11NA; break; case IFM_IEEE80211_11NG: *mode = IEEE80211_MODE_11NG; break; case IFM_AUTO: *mode = IEEE80211_MODE_AUTO; break; default: return 0; } /* * Turbo mode is an ``option''. * XXX does not apply to AUTO */ if (ime->ifm_media & IFM_IEEE80211_TURBO) { if (*mode == IEEE80211_MODE_11A) { if (flags & IEEE80211_F_TURBOP) *mode = IEEE80211_MODE_TURBO_A; else *mode = IEEE80211_MODE_STURBO_A; } else if (*mode == IEEE80211_MODE_11G) *mode = IEEE80211_MODE_TURBO_G; else return 0; } /* XXX HT40 +/- */ return 1; } /* * Handle a media change request on the vap interface. */ int ieee80211_media_change(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; struct ifmedia_entry *ime = vap->iv_media.ifm_cur; uint16_t newmode; if (!media2mode(ime, vap->iv_flags, &newmode)) return EINVAL; if (vap->iv_des_mode != newmode) { vap->iv_des_mode = newmode; /* XXX kick state machine if up+running */ } return 0; } /* * Common code to calculate the media status word * from the operating mode and channel state. */ static int media_status(enum ieee80211_opmode opmode, const struct ieee80211_channel *chan) { int status; status = IFM_IEEE80211; switch (opmode) { case IEEE80211_M_STA: break; case IEEE80211_M_IBSS: status |= IFM_IEEE80211_ADHOC; break; case IEEE80211_M_HOSTAP: status |= IFM_IEEE80211_HOSTAP; break; case IEEE80211_M_MONITOR: status |= IFM_IEEE80211_MONITOR; break; case IEEE80211_M_AHDEMO: status |= IFM_IEEE80211_ADHOC | IFM_FLAG0; break; case IEEE80211_M_WDS: status |= IFM_IEEE80211_WDS; break; case IEEE80211_M_MBSS: status |= IFM_IEEE80211_MBSS; break; } if (IEEE80211_IS_CHAN_HTA(chan)) { status |= IFM_IEEE80211_11NA; } else if (IEEE80211_IS_CHAN_HTG(chan)) { status |= IFM_IEEE80211_11NG; } else if (IEEE80211_IS_CHAN_A(chan)) { status |= IFM_IEEE80211_11A; } else if (IEEE80211_IS_CHAN_B(chan)) { status |= IFM_IEEE80211_11B; } else if (IEEE80211_IS_CHAN_ANYG(chan)) { status |= IFM_IEEE80211_11G; } else if (IEEE80211_IS_CHAN_FHSS(chan)) { status |= IFM_IEEE80211_FH; } /* XXX else complain? */ if (IEEE80211_IS_CHAN_TURBO(chan)) status |= IFM_IEEE80211_TURBO; #if 0 if (IEEE80211_IS_CHAN_HT20(chan)) status |= IFM_IEEE80211_HT20; if (IEEE80211_IS_CHAN_HT40(chan)) status |= IFM_IEEE80211_HT40; #endif return status; } void ieee80211_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; enum ieee80211_phymode mode; imr->ifm_status = IFM_AVALID; /* * NB: use the current channel's mode to lock down a xmit * rate only when running; otherwise we may have a mismatch * in which case the rate will not be convertible. */ if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) { imr->ifm_status |= IFM_ACTIVE; mode = ieee80211_chan2mode(ic->ic_curchan); } else mode = IEEE80211_MODE_AUTO; imr->ifm_active = media_status(vap->iv_opmode, ic->ic_curchan); /* * Calculate a current rate if possible. */ if (vap->iv_txparms[mode].ucastrate != IEEE80211_FIXED_RATE_NONE) { /* * A fixed rate is set, report that. */ imr->ifm_active |= ieee80211_rate2media(ic, vap->iv_txparms[mode].ucastrate, mode); } else if (vap->iv_opmode == IEEE80211_M_STA) { /* * In station mode report the current transmit rate. */ imr->ifm_active |= ieee80211_rate2media(ic, vap->iv_bss->ni_txrate, mode); } else imr->ifm_active |= IFM_AUTO; if (imr->ifm_status & IFM_ACTIVE) imr->ifm_current = imr->ifm_active; } /* * Set the current phy mode and recalculate the active channel * set based on the available channels for this mode. Also * select a new default/current channel if the current one is * inappropriate for this mode. */ int ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode) { /* * Adjust basic rates in 11b/11g supported rate set. * Note that if operating on a hal/quarter rate channel * this is a noop as those rates sets are different * and used instead. */ if (mode == IEEE80211_MODE_11G || mode == IEEE80211_MODE_11B) ieee80211_setbasicrates(&ic->ic_sup_rates[mode], mode); ic->ic_curmode = mode; ieee80211_reset_erp(ic); /* reset ERP state */ return 0; } /* * Return the phy mode for with the specified channel. */ enum ieee80211_phymode ieee80211_chan2mode(const struct ieee80211_channel *chan) { if (IEEE80211_IS_CHAN_VHT_2GHZ(chan)) return IEEE80211_MODE_VHT_2GHZ; else if (IEEE80211_IS_CHAN_VHT_5GHZ(chan)) return IEEE80211_MODE_VHT_5GHZ; else if (IEEE80211_IS_CHAN_HTA(chan)) return IEEE80211_MODE_11NA; else if (IEEE80211_IS_CHAN_HTG(chan)) return IEEE80211_MODE_11NG; else if (IEEE80211_IS_CHAN_108G(chan)) return IEEE80211_MODE_TURBO_G; else if (IEEE80211_IS_CHAN_ST(chan)) return IEEE80211_MODE_STURBO_A; else if (IEEE80211_IS_CHAN_TURBO(chan)) return IEEE80211_MODE_TURBO_A; else if (IEEE80211_IS_CHAN_HALF(chan)) return IEEE80211_MODE_HALF; else if (IEEE80211_IS_CHAN_QUARTER(chan)) return IEEE80211_MODE_QUARTER; else if (IEEE80211_IS_CHAN_A(chan)) return IEEE80211_MODE_11A; else if (IEEE80211_IS_CHAN_ANYG(chan)) return IEEE80211_MODE_11G; else if (IEEE80211_IS_CHAN_B(chan)) return IEEE80211_MODE_11B; else if (IEEE80211_IS_CHAN_FHSS(chan)) return IEEE80211_MODE_FH; /* NB: should not get here */ printf("%s: cannot map channel to mode; freq %u flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); return IEEE80211_MODE_11B; } struct ratemedia { u_int match; /* rate + mode */ u_int media; /* if_media rate */ }; static int findmedia(const struct ratemedia rates[], int n, u_int match) { int i; for (i = 0; i < n; i++) if (rates[i].match == match) return rates[i].media; return IFM_AUTO; } /* * Convert IEEE80211 rate value to ifmedia subtype. * Rate is either a legacy rate in units of 0.5Mbps * or an MCS index. */ int ieee80211_rate2media(struct ieee80211com *ic, int rate, enum ieee80211_phymode mode) { static const struct ratemedia rates[] = { { 2 | IFM_IEEE80211_FH, IFM_IEEE80211_FH1 }, { 4 | IFM_IEEE80211_FH, IFM_IEEE80211_FH2 }, { 2 | IFM_IEEE80211_11B, IFM_IEEE80211_DS1 }, { 4 | IFM_IEEE80211_11B, IFM_IEEE80211_DS2 }, { 11 | IFM_IEEE80211_11B, IFM_IEEE80211_DS5 }, { 22 | IFM_IEEE80211_11B, IFM_IEEE80211_DS11 }, { 44 | IFM_IEEE80211_11B, IFM_IEEE80211_DS22 }, { 12 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM6 }, { 18 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM9 }, { 24 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM12 }, { 36 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM18 }, { 48 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM24 }, { 72 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM36 }, { 96 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM48 }, { 108 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM54 }, { 2 | IFM_IEEE80211_11G, IFM_IEEE80211_DS1 }, { 4 | IFM_IEEE80211_11G, IFM_IEEE80211_DS2 }, { 11 | IFM_IEEE80211_11G, IFM_IEEE80211_DS5 }, { 22 | IFM_IEEE80211_11G, IFM_IEEE80211_DS11 }, { 12 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM6 }, { 18 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM9 }, { 24 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM12 }, { 36 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM18 }, { 48 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM24 }, { 72 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM36 }, { 96 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM48 }, { 108 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM54 }, { 6 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM3 }, { 9 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM4 }, { 54 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM27 }, /* NB: OFDM72 doesn't really exist so we don't handle it */ }; static const struct ratemedia htrates[] = { { 0, IFM_IEEE80211_MCS }, { 1, IFM_IEEE80211_MCS }, { 2, IFM_IEEE80211_MCS }, { 3, IFM_IEEE80211_MCS }, { 4, IFM_IEEE80211_MCS }, { 5, IFM_IEEE80211_MCS }, { 6, IFM_IEEE80211_MCS }, { 7, IFM_IEEE80211_MCS }, { 8, IFM_IEEE80211_MCS }, { 9, IFM_IEEE80211_MCS }, { 10, IFM_IEEE80211_MCS }, { 11, IFM_IEEE80211_MCS }, { 12, IFM_IEEE80211_MCS }, { 13, IFM_IEEE80211_MCS }, { 14, IFM_IEEE80211_MCS }, { 15, IFM_IEEE80211_MCS }, { 16, IFM_IEEE80211_MCS }, { 17, IFM_IEEE80211_MCS }, { 18, IFM_IEEE80211_MCS }, { 19, IFM_IEEE80211_MCS }, { 20, IFM_IEEE80211_MCS }, { 21, IFM_IEEE80211_MCS }, { 22, IFM_IEEE80211_MCS }, { 23, IFM_IEEE80211_MCS }, { 24, IFM_IEEE80211_MCS }, { 25, IFM_IEEE80211_MCS }, { 26, IFM_IEEE80211_MCS }, { 27, IFM_IEEE80211_MCS }, { 28, IFM_IEEE80211_MCS }, { 29, IFM_IEEE80211_MCS }, { 30, IFM_IEEE80211_MCS }, { 31, IFM_IEEE80211_MCS }, { 32, IFM_IEEE80211_MCS }, { 33, IFM_IEEE80211_MCS }, { 34, IFM_IEEE80211_MCS }, { 35, IFM_IEEE80211_MCS }, { 36, IFM_IEEE80211_MCS }, { 37, IFM_IEEE80211_MCS }, { 38, IFM_IEEE80211_MCS }, { 39, IFM_IEEE80211_MCS }, { 40, IFM_IEEE80211_MCS }, { 41, IFM_IEEE80211_MCS }, { 42, IFM_IEEE80211_MCS }, { 43, IFM_IEEE80211_MCS }, { 44, IFM_IEEE80211_MCS }, { 45, IFM_IEEE80211_MCS }, { 46, IFM_IEEE80211_MCS }, { 47, IFM_IEEE80211_MCS }, { 48, IFM_IEEE80211_MCS }, { 49, IFM_IEEE80211_MCS }, { 50, IFM_IEEE80211_MCS }, { 51, IFM_IEEE80211_MCS }, { 52, IFM_IEEE80211_MCS }, { 53, IFM_IEEE80211_MCS }, { 54, IFM_IEEE80211_MCS }, { 55, IFM_IEEE80211_MCS }, { 56, IFM_IEEE80211_MCS }, { 57, IFM_IEEE80211_MCS }, { 58, IFM_IEEE80211_MCS }, { 59, IFM_IEEE80211_MCS }, { 60, IFM_IEEE80211_MCS }, { 61, IFM_IEEE80211_MCS }, { 62, IFM_IEEE80211_MCS }, { 63, IFM_IEEE80211_MCS }, { 64, IFM_IEEE80211_MCS }, { 65, IFM_IEEE80211_MCS }, { 66, IFM_IEEE80211_MCS }, { 67, IFM_IEEE80211_MCS }, { 68, IFM_IEEE80211_MCS }, { 69, IFM_IEEE80211_MCS }, { 70, IFM_IEEE80211_MCS }, { 71, IFM_IEEE80211_MCS }, { 72, IFM_IEEE80211_MCS }, { 73, IFM_IEEE80211_MCS }, { 74, IFM_IEEE80211_MCS }, { 75, IFM_IEEE80211_MCS }, { 76, IFM_IEEE80211_MCS }, }; int m; /* * Check 11n rates first for match as an MCS. */ if (mode == IEEE80211_MODE_11NA) { if (rate & IEEE80211_RATE_MCS) { rate &= ~IEEE80211_RATE_MCS; m = findmedia(htrates, nitems(htrates), rate); if (m != IFM_AUTO) return m | IFM_IEEE80211_11NA; } } else if (mode == IEEE80211_MODE_11NG) { /* NB: 12 is ambiguous, it will be treated as an MCS */ if (rate & IEEE80211_RATE_MCS) { rate &= ~IEEE80211_RATE_MCS; m = findmedia(htrates, nitems(htrates), rate); if (m != IFM_AUTO) return m | IFM_IEEE80211_11NG; } } rate &= IEEE80211_RATE_VAL; switch (mode) { case IEEE80211_MODE_11A: case IEEE80211_MODE_HALF: /* XXX good 'nuf */ case IEEE80211_MODE_QUARTER: case IEEE80211_MODE_11NA: case IEEE80211_MODE_TURBO_A: case IEEE80211_MODE_STURBO_A: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11A); case IEEE80211_MODE_11B: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11B); case IEEE80211_MODE_FH: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_FH); case IEEE80211_MODE_AUTO: /* NB: ic may be NULL for some drivers */ if (ic != NULL && ic->ic_phytype == IEEE80211_T_FH) return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_FH); /* NB: hack, 11g matches both 11b+11a rates */ /* fall thru... */ case IEEE80211_MODE_11G: case IEEE80211_MODE_11NG: case IEEE80211_MODE_TURBO_G: return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11G); case IEEE80211_MODE_VHT_2GHZ: case IEEE80211_MODE_VHT_5GHZ: /* XXX TODO: need to figure out mapping for VHT rates */ return IFM_AUTO; } return IFM_AUTO; } int ieee80211_media2rate(int mword) { static const int ieeerates[] = { -1, /* IFM_AUTO */ 0, /* IFM_MANUAL */ 0, /* IFM_NONE */ 2, /* IFM_IEEE80211_FH1 */ 4, /* IFM_IEEE80211_FH2 */ 2, /* IFM_IEEE80211_DS1 */ 4, /* IFM_IEEE80211_DS2 */ 11, /* IFM_IEEE80211_DS5 */ 22, /* IFM_IEEE80211_DS11 */ 44, /* IFM_IEEE80211_DS22 */ 12, /* IFM_IEEE80211_OFDM6 */ 18, /* IFM_IEEE80211_OFDM9 */ 24, /* IFM_IEEE80211_OFDM12 */ 36, /* IFM_IEEE80211_OFDM18 */ 48, /* IFM_IEEE80211_OFDM24 */ 72, /* IFM_IEEE80211_OFDM36 */ 96, /* IFM_IEEE80211_OFDM48 */ 108, /* IFM_IEEE80211_OFDM54 */ 144, /* IFM_IEEE80211_OFDM72 */ 0, /* IFM_IEEE80211_DS354k */ 0, /* IFM_IEEE80211_DS512k */ 6, /* IFM_IEEE80211_OFDM3 */ 9, /* IFM_IEEE80211_OFDM4 */ 54, /* IFM_IEEE80211_OFDM27 */ -1, /* IFM_IEEE80211_MCS */ -1, /* IFM_IEEE80211_VHT */ }; return IFM_SUBTYPE(mword) < nitems(ieeerates) ? ieeerates[IFM_SUBTYPE(mword)] : 0; } /* * The following hash function is adapted from "Hash Functions" by Bob Jenkins * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). */ #define mix(a, b, c) \ do { \ a -= b; a -= c; a ^= (c >> 13); \ b -= c; b -= a; b ^= (a << 8); \ c -= a; c -= b; c ^= (b >> 13); \ a -= b; a -= c; a ^= (c >> 12); \ b -= c; b -= a; b ^= (a << 16); \ c -= a; c -= b; c ^= (b >> 5); \ a -= b; a -= c; a ^= (c >> 3); \ b -= c; b -= a; b ^= (a << 10); \ c -= a; c -= b; c ^= (b >> 15); \ } while (/*CONSTCOND*/0) uint32_t ieee80211_mac_hash(const struct ieee80211com *ic, const uint8_t addr[IEEE80211_ADDR_LEN]) { uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = ic->ic_hash_key; b += addr[5] << 8; b += addr[4]; a += addr[3] << 24; a += addr[2] << 16; a += addr[1] << 8; a += addr[0]; mix(a, b, c); return c; } #undef mix char ieee80211_channel_type_char(const struct ieee80211_channel *c) { if (IEEE80211_IS_CHAN_ST(c)) return 'S'; if (IEEE80211_IS_CHAN_108A(c)) return 'T'; if (IEEE80211_IS_CHAN_108G(c)) return 'G'; if (IEEE80211_IS_CHAN_VHT(c)) return 'v'; if (IEEE80211_IS_CHAN_HT(c)) return 'n'; if (IEEE80211_IS_CHAN_A(c)) return 'a'; if (IEEE80211_IS_CHAN_ANYG(c)) return 'g'; if (IEEE80211_IS_CHAN_B(c)) return 'b'; return 'f'; } Index: projects/clang400-import/sys/powerpc/powerpc/db_trace.c =================================================================== --- projects/clang400-import/sys/powerpc/powerpc/db_trace.c (revision 312719) +++ projects/clang400-import/sys/powerpc/powerpc/db_trace.c (revision 312720) @@ -1,310 +1,314 @@ /* $FreeBSD$ */ /* $NetBSD: db_trace.c,v 1.20 2002/05/13 20:30:09 matt Exp $ */ /* $OpenBSD: db_trace.c,v 1.3 1997/03/21 02:10:48 niklas Exp $ */ /*- * Mach Operating System * Copyright (c) 1992 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static db_varfcn_t db_frame; #define DB_OFFSET(x) (db_expr_t *)offsetof(struct trapframe, x) #ifdef __powerpc64__ #define CALLOFFSET 8 /* Include TOC reload slot */ #else #define CALLOFFSET 4 #endif struct db_variable db_regs[] = { { "r0", DB_OFFSET(fixreg[0]), db_frame }, { "r1", DB_OFFSET(fixreg[1]), db_frame }, { "r2", DB_OFFSET(fixreg[2]), db_frame }, { "r3", DB_OFFSET(fixreg[3]), db_frame }, { "r4", DB_OFFSET(fixreg[4]), db_frame }, { "r5", DB_OFFSET(fixreg[5]), db_frame }, { "r6", DB_OFFSET(fixreg[6]), db_frame }, { "r7", DB_OFFSET(fixreg[7]), db_frame }, { "r8", DB_OFFSET(fixreg[8]), db_frame }, { "r9", DB_OFFSET(fixreg[9]), db_frame }, { "r10", DB_OFFSET(fixreg[10]), db_frame }, { "r11", DB_OFFSET(fixreg[11]), db_frame }, { "r12", DB_OFFSET(fixreg[12]), db_frame }, { "r13", DB_OFFSET(fixreg[13]), db_frame }, { "r14", DB_OFFSET(fixreg[14]), db_frame }, { "r15", DB_OFFSET(fixreg[15]), db_frame }, { "r16", DB_OFFSET(fixreg[16]), db_frame }, { "r17", DB_OFFSET(fixreg[17]), db_frame }, { "r18", DB_OFFSET(fixreg[18]), db_frame }, { "r19", DB_OFFSET(fixreg[19]), db_frame }, { "r20", DB_OFFSET(fixreg[20]), db_frame }, { "r21", DB_OFFSET(fixreg[21]), db_frame }, { "r22", DB_OFFSET(fixreg[22]), db_frame }, { "r23", DB_OFFSET(fixreg[23]), db_frame }, { "r24", DB_OFFSET(fixreg[24]), db_frame }, { "r25", DB_OFFSET(fixreg[25]), db_frame }, { "r26", DB_OFFSET(fixreg[26]), db_frame }, { "r27", DB_OFFSET(fixreg[27]), db_frame }, { "r28", DB_OFFSET(fixreg[28]), db_frame }, { "r29", DB_OFFSET(fixreg[29]), db_frame }, { "r30", DB_OFFSET(fixreg[30]), db_frame }, { "r31", DB_OFFSET(fixreg[31]), db_frame }, { "srr0", DB_OFFSET(srr0), db_frame }, { "srr1", DB_OFFSET(srr1), db_frame }, { "lr", DB_OFFSET(lr), db_frame }, { "ctr", DB_OFFSET(ctr), db_frame }, { "cr", DB_OFFSET(cr), db_frame }, { "xer", DB_OFFSET(xer), db_frame }, { "dar", DB_OFFSET(dar), db_frame }, #ifdef AIM { "dsisr", DB_OFFSET(cpu.aim.dsisr), db_frame }, #endif #if defined(BOOKE) { "esr", DB_OFFSET(cpu.booke.esr), db_frame }, #endif }; struct db_variable *db_eregs = db_regs + nitems(db_regs); /* * register variable handling */ static int db_frame(struct db_variable *vp, db_expr_t *valuep, int op) { register_t *reg; if (kdb_frame == NULL) return (0); reg = (register_t*)((uintptr_t)kdb_frame + (uintptr_t)vp->valuep); if (op == DB_VAR_GET) *valuep = *reg; else *reg = *valuep; return (1); } /* * Frame tracing. */ static int db_backtrace(struct thread *td, db_addr_t fp, int count) { db_addr_t stackframe, lr, *args; boolean_t kernel_only = TRUE; boolean_t full = FALSE; #if 0 { register char *cp = modif; register char c; while ((c = *cp++) != 0) { if (c == 't') trace_thread = TRUE; if (c == 'u') kernel_only = FALSE; if (c == 'f') full = TRUE; } } #endif stackframe = fp; while (!db_pager_quit) { if (stackframe < PAGE_SIZE) break; /* * Locate the next frame by grabbing the backchain ptr * from frame[0] */ stackframe = *(db_addr_t *)stackframe; next_frame: #ifdef __powerpc64__ /* The saved arg values start at frame[6] */ args = (db_addr_t *)(stackframe + 48); #else /* The saved arg values start at frame[2] */ args = (db_addr_t *)(stackframe + 8); #endif if (stackframe < PAGE_SIZE) break; if (count-- == 0) break; /* * Extract link register from frame and subtract * 4 to convert into calling address (as opposed to * return address) */ #ifdef __powerpc64__ lr = *(db_addr_t *)(stackframe + 16) - 4; #else lr = *(db_addr_t *)(stackframe + 4) - 4; #endif if ((lr & 3) || (lr < 0x100)) { db_printf("saved LR(0x%zx) is invalid.", lr); break; } #ifdef __powerpc64__ db_printf("0x%016lx: ", stackframe); #else db_printf("0x%08x: ", stackframe); #endif /* * The trap code labels the return addresses from the * call to C code as 'trapexit' and 'asttrapexit. Use this * to determine if the callframe has to traverse a saved * trap context */ if ((lr + CALLOFFSET == (db_addr_t) &trapexit) || (lr + CALLOFFSET == (db_addr_t) &asttrapexit)) { const char *trapstr; struct trapframe *tf = (struct trapframe *)(args); db_printf("%s ", tf->srr1 & PSL_PR ? "user" : "kernel"); switch (tf->exc) { case EXC_DSI: /* XXX take advantage of the union. */ db_printf("DSI %s trap @ %#zx by ", (tf->cpu.aim.dsisr & DSISR_STORE) ? "write" : "read", tf->dar); goto print_trap; case EXC_ALI: /* XXX take advantage of the union. */ db_printf("ALI trap @ %#zx (xSR %#x) ", tf->dar, (uint32_t)tf->cpu.aim.dsisr); goto print_trap; #ifdef __powerpc64__ case EXC_DSE: db_printf("DSE trap @ %#zx by ", tf->dar); goto print_trap; case EXC_ISE: db_printf("ISE trap @ %#zx by ", tf->srr0); goto print_trap; #endif case EXC_ISI: trapstr = "ISI"; break; case EXC_PGM: trapstr = "PGM"; break; case EXC_SC: trapstr = "SC"; break; case EXC_EXI: trapstr = "EXI"; break; case EXC_MCHK: trapstr = "MCHK"; break; #if !defined(BOOKE) case EXC_VEC: trapstr = "VEC"; break; case EXC_FPA: trapstr = "FPA"; break; case EXC_BPT: trapstr = "BPT"; break; case EXC_TRC: trapstr = "TRC"; break; case EXC_RUNMODETRC: trapstr = "RUNMODETRC"; break; case EXC_SMI: trapstr = "SMI"; break; case EXC_RST: trapstr = "RST"; break; #endif case EXC_FPU: trapstr = "FPU"; break; case EXC_DECR: trapstr = "DECR"; break; case EXC_PERF: trapstr = "PERF"; break; case EXC_VSX: trapstr = "VSX"; break; default: trapstr = NULL; break; } if (trapstr != NULL) { db_printf("%s trap by ", trapstr); } else { db_printf("trap %#zx by ", tf->exc); } print_trap: lr = (db_addr_t) tf->srr0; db_printsym(lr, DB_STGY_ANY); db_printf(": srr1=%#zx\n", tf->srr1); db_printf("%-10s r1=%#zx cr=%#x xer=%#x ctr=%#zx", "", tf->fixreg[1], (uint32_t)tf->cr, (uint32_t)tf->xer, tf->ctr); #ifdef __powerpc64__ db_printf(" r2=%#zx", tf->fixreg[2]); #endif if (tf->exc == EXC_DSI) db_printf(" sr=%#x", (uint32_t)tf->cpu.aim.dsisr); db_printf("\n"); stackframe = (db_addr_t) tf->fixreg[1]; if (kernel_only && (tf->srr1 & PSL_PR)) break; goto next_frame; } db_printf("at "); db_printsym(lr, DB_STGY_PROC); if (full) /* Print all the args stored in that stackframe. */ db_printf("(%zx, %zx, %zx, %zx, %zx, %zx, %zx, %zx)", args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7]); db_printf("\n"); } return (0); } void db_trace_self(void) { db_addr_t addr; - addr = (db_addr_t)__builtin_frame_address(1); - db_backtrace(curthread, addr, -1); + addr = (db_addr_t)__builtin_frame_address(0); + if (addr == 0) { + db_printf("Null frame address\n"); + return; + } + db_backtrace(curthread, *(db_addr_t *)addr, -1); } int db_trace_thread(struct thread *td, int count) { struct pcb *ctx; ctx = kdb_thr_ctx(td); return (db_backtrace(td, (db_addr_t)ctx->pcb_sp, count)); } Index: projects/clang400-import/sys/riscv/include/frame.h =================================================================== --- projects/clang400-import/sys/riscv/include/frame.h (revision 312719) +++ projects/clang400-import/sys/riscv/include/frame.h (revision 312720) @@ -1,77 +1,80 @@ /*- * Copyright (c) 2015 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_FRAME_H_ #define _MACHINE_FRAME_H_ #ifndef LOCORE #include #include /* * NOTE: keep this structure in sync with struct reg and struct mcontext. */ struct trapframe { uint64_t tf_ra; uint64_t tf_sp; uint64_t tf_gp; uint64_t tf_tp; uint64_t tf_t[7]; uint64_t tf_s[12]; uint64_t tf_a[8]; uint64_t tf_sepc; uint64_t tf_sstatus; uint64_t tf_sbadaddr; uint64_t tf_scause; }; struct riscv_frame { struct riscv_frame *f_frame; u_long f_retaddr; }; /* * Signal frame. Pushed onto user stack before calling sigcode. */ struct sigframe { siginfo_t sf_si; /* actual saved siginfo */ ucontext_t sf_uc; /* actual saved ucontext */ }; #endif /* !LOCORE */ +/* Definitions for syscalls */ +#define NARGREG 8 /* 8 args in regs */ + #endif /* !_MACHINE_FRAME_H_ */ Index: projects/clang400-import/sys/riscv/riscv/trap.c =================================================================== --- projects/clang400-import/sys/riscv/riscv/trap.c (revision 312719) +++ projects/clang400-import/sys/riscv/riscv/trap.c (revision 312720) @@ -1,386 +1,386 @@ /*- * Copyright (c) 2015-2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifdef KDB #include #endif #include #include #include #include #include #include #include #include #include #include #include #ifdef KDTRACE_HOOKS #include #endif int (*dtrace_invop_jump_addr)(struct trapframe *); extern register_t fsu_intr_fault; /* Called from exception.S */ void do_trap_supervisor(struct trapframe *); void do_trap_user(struct trapframe *); static __inline void call_trapsignal(struct thread *td, int sig, int code, void *addr) { ksiginfo_t ksi; ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = code; ksi.ksi_addr = addr; trapsignal(td, &ksi); } int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) { struct proc *p; register_t *ap; int nap; - nap = 8; + nap = NARGREG; p = td->td_proc; ap = &td->td_frame->tf_a[0]; sa->code = td->td_frame->tf_t[0]; if (sa->code == SYS_syscall || sa->code == SYS___syscall) { sa->code = *ap++; nap--; } if (p->p_sysent->sv_mask) sa->code &= p->p_sysent->sv_mask; if (sa->code >= p->p_sysent->sv_size) sa->callp = &p->p_sysent->sv_table[0]; else sa->callp = &p->p_sysent->sv_table[sa->code]; sa->narg = sa->callp->sy_narg; memcpy(sa->args, ap, nap * sizeof(register_t)); if (sa->narg > nap) - panic("TODO: Could we have more then 8 args?"); + panic("TODO: Could we have more then %d args?", NARGREG); td->td_retval[0] = 0; td->td_retval[1] = 0; return (0); } #include "../../kern/subr_syscall.c" static void dump_regs(struct trapframe *frame) { int n; int i; n = (sizeof(frame->tf_t) / sizeof(frame->tf_t[0])); for (i = 0; i < n; i++) printf("t[%d] == 0x%016lx\n", i, frame->tf_t[i]); n = (sizeof(frame->tf_s) / sizeof(frame->tf_s[0])); for (i = 0; i < n; i++) printf("s[%d] == 0x%016lx\n", i, frame->tf_s[i]); n = (sizeof(frame->tf_a) / sizeof(frame->tf_a[0])); for (i = 0; i < n; i++) printf("a[%d] == 0x%016lx\n", i, frame->tf_a[i]); printf("sepc == 0x%016lx\n", frame->tf_sepc); printf("sstatus == 0x%016lx\n", frame->tf_sstatus); } static void svc_handler(struct trapframe *frame) { struct syscall_args sa; struct thread *td; int error; td = curthread; td->td_frame = frame; error = syscallenter(td, &sa); syscallret(td, error, &sa); } static void data_abort(struct trapframe *frame, int lower) { struct vm_map *map; uint64_t sbadaddr; struct thread *td; struct pcb *pcb; vm_prot_t ftype; vm_offset_t va; struct proc *p; int ucode; int error; int sig; #ifdef KDB if (kdb_active) { kdb_reenter(); return; } #endif td = curthread; pcb = td->td_pcb; /* * Special case for fuswintr and suswintr. These can't sleep so * handle them early on in the trap handler. */ if (__predict_false(pcb->pcb_onfault == (vm_offset_t)&fsu_intr_fault)) { frame->tf_sepc = pcb->pcb_onfault; return; } sbadaddr = frame->tf_sbadaddr; p = td->td_proc; if (lower) map = &td->td_proc->p_vmspace->vm_map; else { /* The top bit tells us which range to use */ if ((sbadaddr >> 63) == 1) map = kernel_map; else map = &td->td_proc->p_vmspace->vm_map; } va = trunc_page(sbadaddr); if (frame->tf_scause == EXCP_FAULT_STORE) { ftype = (VM_PROT_READ | VM_PROT_WRITE); } else { ftype = (VM_PROT_READ); } if (map != kernel_map) { /* * Keep swapout from messing with us during this * critical time. */ PROC_LOCK(p); ++p->p_lock; PROC_UNLOCK(p); /* Fault in the user page: */ error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); PROC_LOCK(p); --p->p_lock; PROC_UNLOCK(p); } else { /* * Don't have to worry about process locking or stacks in the * kernel. */ error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); } if (error != KERN_SUCCESS) { if (lower) { sig = SIGSEGV; if (error == KERN_PROTECTION_FAILURE) ucode = SEGV_ACCERR; else ucode = SEGV_MAPERR; call_trapsignal(td, sig, ucode, (void *)sbadaddr); } else { if (td->td_intr_nesting_level == 0 && pcb->pcb_onfault != 0) { frame->tf_a[0] = error; frame->tf_sepc = pcb->pcb_onfault; return; } dump_regs(frame); panic("vm_fault failed: %lx, va 0x%016lx", frame->tf_sepc, sbadaddr); } } if (lower) userret(td, frame); } void do_trap_supervisor(struct trapframe *frame) { uint64_t exception; uint64_t sstatus; /* Ensure we came from supervisor mode, interrupts disabled */ __asm __volatile("csrr %0, sstatus" : "=&r" (sstatus)); KASSERT((sstatus & (SSTATUS_SPP | SSTATUS_SIE)) == SSTATUS_SPP, ("We must came from S mode with interrupts disabled")); exception = (frame->tf_scause & EXCP_MASK); if (frame->tf_scause & EXCP_INTR) { /* Interrupt */ riscv_cpu_intr(frame); return; } #ifdef KDTRACE_HOOKS if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception)) return; #endif CTR3(KTR_TRAP, "do_trap_supervisor: curthread: %p, sepc: %lx, frame: %p", curthread, frame->tf_sepc, frame); switch(exception) { case EXCP_FAULT_LOAD: case EXCP_FAULT_STORE: case EXCP_FAULT_FETCH: data_abort(frame, 0); break; case EXCP_BREAKPOINT: #ifdef KDTRACE_HOOKS if (dtrace_invop_jump_addr != 0) { dtrace_invop_jump_addr(frame); break; } #endif #ifdef KDB kdb_trap(exception, 0, frame); #else dump_regs(frame); panic("No debugger in kernel.\n"); #endif break; case EXCP_ILLEGAL_INSTRUCTION: dump_regs(frame); panic("Illegal instruction at 0x%016lx\n", frame->tf_sepc); break; default: dump_regs(frame); panic("Unknown kernel exception %x badaddr %lx\n", exception, frame->tf_sbadaddr); } } void do_trap_user(struct trapframe *frame) { uint64_t exception; struct thread *td; uint64_t sstatus; struct pcb *pcb; td = curthread; td->td_frame = frame; pcb = td->td_pcb; /* Ensure we came from usermode, interrupts disabled */ __asm __volatile("csrr %0, sstatus" : "=&r" (sstatus)); KASSERT((sstatus & (SSTATUS_SPP | SSTATUS_SIE)) == 0, ("We must came from U mode with interrupts disabled")); exception = (frame->tf_scause & EXCP_MASK); if (frame->tf_scause & EXCP_INTR) { /* Interrupt */ riscv_cpu_intr(frame); return; } CTR3(KTR_TRAP, "do_trap_user: curthread: %p, sepc: %lx, frame: %p", curthread, frame->tf_sepc, frame); switch(exception) { case EXCP_FAULT_LOAD: case EXCP_FAULT_STORE: case EXCP_FAULT_FETCH: data_abort(frame, 1); break; case EXCP_USER_ECALL: frame->tf_sepc += 4; /* Next instruction */ svc_handler(frame); break; case EXCP_ILLEGAL_INSTRUCTION: #ifdef FPE if ((pcb->pcb_fpflags & PCB_FP_STARTED) == 0) { /* * May be a FPE trap. Enable FPE usage * for this thread and try again. */ frame->tf_sstatus |= SSTATUS_FS_INITIAL; pcb->pcb_fpflags |= PCB_FP_STARTED; break; } #endif call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)frame->tf_sepc); userret(td, frame); break; case EXCP_BREAKPOINT: call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_sepc); userret(td, frame); break; default: dump_regs(frame); panic("Unknown userland exception %x badaddr %lx\n", exception, frame->tf_sbadaddr); } } Index: projects/clang400-import/sys/sys/eventhandler.h =================================================================== --- projects/clang400-import/sys/sys/eventhandler.h (revision 312719) +++ projects/clang400-import/sys/sys/eventhandler.h (revision 312720) @@ -1,287 +1,294 @@ /*- * Copyright (c) 1999 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_EVENTHANDLER_H_ #define _SYS_EVENTHANDLER_H_ #include #include #include #include struct eventhandler_entry { TAILQ_ENTRY(eventhandler_entry) ee_link; int ee_priority; #define EHE_DEAD_PRIORITY (-1) void *ee_arg; }; #ifdef VIMAGE struct eventhandler_entry_vimage { void (* func)(void); /* Original function registered. */ void *ee_arg; /* Original argument registered. */ void *sparep[2]; }; #endif struct eventhandler_list { char *el_name; int el_flags; #define EHL_INITTED (1<<0) u_int el_runcount; struct mtx el_lock; TAILQ_ENTRY(eventhandler_list) el_link; TAILQ_HEAD(,eventhandler_entry) el_entries; }; typedef struct eventhandler_entry *eventhandler_tag; #define EHL_LOCK(p) mtx_lock(&(p)->el_lock) #define EHL_UNLOCK(p) mtx_unlock(&(p)->el_lock) #define EHL_LOCK_ASSERT(p, x) mtx_assert(&(p)->el_lock, x) /* * Macro to invoke the handlers for a given event. */ #define _EVENTHANDLER_INVOKE(name, list, ...) do { \ struct eventhandler_entry *_ep; \ struct eventhandler_entry_ ## name *_t; \ \ KASSERT((list)->el_flags & EHL_INITTED, \ ("eventhandler_invoke: running non-inited list")); \ EHL_LOCK_ASSERT((list), MA_OWNED); \ (list)->el_runcount++; \ KASSERT((list)->el_runcount > 0, \ ("eventhandler_invoke: runcount overflow")); \ CTR0(KTR_EVH, "eventhandler_invoke(\"" __STRING(name) "\")"); \ TAILQ_FOREACH(_ep, &((list)->el_entries), ee_link) { \ if (_ep->ee_priority != EHE_DEAD_PRIORITY) { \ EHL_UNLOCK((list)); \ _t = (struct eventhandler_entry_ ## name *)_ep; \ CTR1(KTR_EVH, "eventhandler_invoke: executing %p", \ (void *)_t->eh_func); \ _t->eh_func(_ep->ee_arg , ## __VA_ARGS__); \ EHL_LOCK((list)); \ } \ } \ KASSERT((list)->el_runcount > 0, \ ("eventhandler_invoke: runcount underflow")); \ (list)->el_runcount--; \ if ((list)->el_runcount == 0) \ eventhandler_prune_list(list); \ EHL_UNLOCK((list)); \ } while (0) /* * Slow handlers are entirely dynamic; lists are created * when entries are added to them, and thus have no concept of "owner", * * Slow handlers need to be declared, but do not need to be defined. The * declaration must be in scope wherever the handler is to be invoked. */ #define EVENTHANDLER_DECLARE(name, type) \ struct eventhandler_entry_ ## name \ { \ struct eventhandler_entry ee; \ type eh_func; \ }; \ struct __hack #define EVENTHANDLER_DEFINE(name, func, arg, priority) \ static eventhandler_tag name ## _tag; \ static void name ## _evh_init(void *ctx) \ { \ name ## _tag = EVENTHANDLER_REGISTER(name, func, ctx, \ priority); \ } \ SYSINIT(name ## _evh_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, \ name ## _evh_init, arg); \ struct __hack #define EVENTHANDLER_INVOKE(name, ...) \ do { \ struct eventhandler_list *_el; \ \ if ((_el = eventhandler_find_list(#name)) != NULL) \ _EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \ } while (0) #define EVENTHANDLER_REGISTER(name, func, arg, priority) \ eventhandler_register(NULL, #name, func, arg, priority) #define EVENTHANDLER_DEREGISTER(name, tag) \ do { \ struct eventhandler_list *_el; \ \ if ((_el = eventhandler_find_list(#name)) != NULL) \ eventhandler_deregister(_el, tag); \ } while(0) eventhandler_tag eventhandler_register(struct eventhandler_list *list, const char *name, void *func, void *arg, int priority); void eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag); struct eventhandler_list *eventhandler_find_list(const char *name); void eventhandler_prune_list(struct eventhandler_list *list); #ifdef VIMAGE typedef void (*vimage_iterator_func_t)(void *, ...); eventhandler_tag vimage_eventhandler_register(struct eventhandler_list *list, const char *name, void *func, void *arg, int priority, vimage_iterator_func_t); #endif /* * Standard system event queues. */ /* Generic priority levels */ #define EVENTHANDLER_PRI_FIRST 0 #define EVENTHANDLER_PRI_ANY 10000 #define EVENTHANDLER_PRI_LAST 20000 /* Shutdown events */ typedef void (*shutdown_fn)(void *, int); #define SHUTDOWN_PRI_FIRST EVENTHANDLER_PRI_FIRST #define SHUTDOWN_PRI_DEFAULT EVENTHANDLER_PRI_ANY #define SHUTDOWN_PRI_LAST EVENTHANDLER_PRI_LAST EVENTHANDLER_DECLARE(shutdown_pre_sync, shutdown_fn); /* before fs sync */ EVENTHANDLER_DECLARE(shutdown_post_sync, shutdown_fn); /* after fs sync */ EVENTHANDLER_DECLARE(shutdown_final, shutdown_fn); /* Power state change events */ typedef void (*power_change_fn)(void *); EVENTHANDLER_DECLARE(power_resume, power_change_fn); EVENTHANDLER_DECLARE(power_suspend, power_change_fn); EVENTHANDLER_DECLARE(power_suspend_early, power_change_fn); /* Low memory event */ typedef void (*vm_lowmem_handler_t)(void *, int); #define LOWMEM_PRI_DEFAULT EVENTHANDLER_PRI_FIRST EVENTHANDLER_DECLARE(vm_lowmem, vm_lowmem_handler_t); /* Root mounted event */ typedef void (*mountroot_handler_t)(void *); EVENTHANDLER_DECLARE(mountroot, mountroot_handler_t); /* File system mount events */ struct mount; struct vnode; struct thread; typedef void (*vfs_mounted_notify_fn)(void *, struct mount *, struct vnode *, struct thread *); typedef void (*vfs_unmounted_notify_fn)(void *, struct mount *, struct thread *); EVENTHANDLER_DECLARE(vfs_mounted, vfs_mounted_notify_fn); EVENTHANDLER_DECLARE(vfs_unmounted, vfs_unmounted_notify_fn); /* * Process events * process_fork and exit handlers are called without Giant. * exec handlers are called with Giant, but that is by accident. */ struct proc; struct image_params; typedef void (*exitlist_fn)(void *, struct proc *); typedef void (*forklist_fn)(void *, struct proc *, struct proc *, int); typedef void (*execlist_fn)(void *, struct proc *, struct image_params *); typedef void (*proc_ctor_fn)(void *, struct proc *); typedef void (*proc_dtor_fn)(void *, struct proc *); typedef void (*proc_init_fn)(void *, struct proc *); typedef void (*proc_fini_fn)(void *, struct proc *); EVENTHANDLER_DECLARE(process_ctor, proc_ctor_fn); EVENTHANDLER_DECLARE(process_dtor, proc_dtor_fn); EVENTHANDLER_DECLARE(process_init, proc_init_fn); EVENTHANDLER_DECLARE(process_fini, proc_fini_fn); EVENTHANDLER_DECLARE(process_exit, exitlist_fn); EVENTHANDLER_DECLARE(process_fork, forklist_fn); EVENTHANDLER_DECLARE(process_exec, execlist_fn); /* * application dump event */ typedef void (*app_coredump_start_fn)(void *, struct thread *, char *name); typedef void (*app_coredump_progress_fn)(void *, struct thread *td, int byte_count); typedef void (*app_coredump_finish_fn)(void *, struct thread *td); typedef void (*app_coredump_error_fn)(void *, struct thread *td, char *msg, ...); EVENTHANDLER_DECLARE(app_coredump_start, app_coredump_start_fn); EVENTHANDLER_DECLARE(app_coredump_progress, app_coredump_progress_fn); EVENTHANDLER_DECLARE(app_coredump_finish, app_coredump_finish_fn); EVENTHANDLER_DECLARE(app_coredump_error, app_coredump_error_fn); typedef void (*thread_ctor_fn)(void *, struct thread *); typedef void (*thread_dtor_fn)(void *, struct thread *); typedef void (*thread_fini_fn)(void *, struct thread *); typedef void (*thread_init_fn)(void *, struct thread *); EVENTHANDLER_DECLARE(thread_ctor, thread_ctor_fn); EVENTHANDLER_DECLARE(thread_dtor, thread_dtor_fn); EVENTHANDLER_DECLARE(thread_init, thread_init_fn); EVENTHANDLER_DECLARE(thread_fini, thread_fini_fn); typedef void (*uma_zone_chfn)(void *); EVENTHANDLER_DECLARE(nmbclusters_change, uma_zone_chfn); EVENTHANDLER_DECLARE(nmbufs_change, uma_zone_chfn); EVENTHANDLER_DECLARE(maxsockets_change, uma_zone_chfn); /* Kernel linker file load and unload events */ struct linker_file; typedef void (*kld_load_fn)(void *, struct linker_file *); typedef void (*kld_unload_fn)(void *, const char *, caddr_t, size_t); typedef void (*kld_unload_try_fn)(void *, struct linker_file *, int *); EVENTHANDLER_DECLARE(kld_load, kld_load_fn); EVENTHANDLER_DECLARE(kld_unload, kld_unload_fn); EVENTHANDLER_DECLARE(kld_unload_try, kld_unload_try_fn); /* Generic graphics framebuffer interface */ struct fb_info; typedef void (*register_framebuffer_fn)(void *, struct fb_info *); typedef void (*unregister_framebuffer_fn)(void *, struct fb_info *); EVENTHANDLER_DECLARE(register_framebuffer, register_framebuffer_fn); EVENTHANDLER_DECLARE(unregister_framebuffer, unregister_framebuffer_fn); /* Veto ada attachment */ struct cam_path; struct ata_params; typedef void (*ada_probe_veto_fn)(void *, struct cam_path *, struct ata_params *, int *); EVENTHANDLER_DECLARE(ada_probe_veto, ada_probe_veto_fn); /* Swap device events */ struct swdevt; typedef void (*swapon_fn)(void *, struct swdevt *); typedef void (*swapoff_fn)(void *, struct swdevt *); EVENTHANDLER_DECLARE(swapon, swapon_fn); EVENTHANDLER_DECLARE(swapoff, swapoff_fn); +/* ifup/ifdown events */ +#define IFNET_EVENT_UP 0 +#define IFNET_EVENT_DOWN 1 +struct ifnet; +typedef void (*ifnet_event_fn)(void *, struct ifnet *ifp, int event); +EVENTHANDLER_DECLARE(ifnet_event, ifnet_event_fn); + #endif /* _SYS_EVENTHANDLER_H_ */ Index: projects/clang400-import/sys/sys/gtaskqueue.h =================================================================== --- projects/clang400-import/sys/sys/gtaskqueue.h (revision 312719) +++ projects/clang400-import/sys/sys/gtaskqueue.h (revision 312720) @@ -1,138 +1,126 @@ /*- * Copyright (c) 2014 Jeffrey Roberson * Copyright (c) 2016 Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_GTASKQUEUE_H_ #define _SYS_GTASKQUEUE_H_ #include #ifndef _KERNEL #error "no user-serviceable parts inside" #endif struct gtaskqueue; typedef void (*gtaskqueue_enqueue_fn)(void *context); /* * Taskqueue groups. Manages dynamic thread groups and irq binding for * device and other tasks. */ void gtaskqueue_block(struct gtaskqueue *queue); void gtaskqueue_unblock(struct gtaskqueue *queue); int gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask); void gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *task); void gtaskqueue_drain_all(struct gtaskqueue *queue); int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task); void taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *grptask, void *uniq, int irq, char *name); int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *grptask, void *uniq, int cpu, int irq, char *name); void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask); struct taskqgroup *taskqgroup_create(char *name); void taskqgroup_destroy(struct taskqgroup *qgroup); int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride); #define TASK_ENQUEUED 0x1 #define TASK_SKIP_WAKEUP 0x2 #define GTASK_INIT(task, flags, priority, func, context) do { \ (task)->ta_flags = flags; \ (task)->ta_priority = (priority); \ (task)->ta_func = (func); \ (task)->ta_context = (context); \ } while (0) #define GROUPTASK_INIT(gtask, priority, func, context) \ GTASK_INIT(&(gtask)->gt_task, TASK_SKIP_WAKEUP, priority, func, context) #define GROUPTASK_ENQUEUE(gtask) \ grouptaskqueue_enqueue((gtask)->gt_taskqueue, &(gtask)->gt_task) #define TASKQGROUP_DECLARE(name) \ extern struct taskqgroup *qgroup_##name -#if (!defined(SMP) || defined(EARLY_AP_STARTUP)) +#ifdef EARLY_AP_STARTUP #define TASKQGROUP_DEFINE(name, cnt, stride) \ \ struct taskqgroup *qgroup_##name; \ \ static void \ taskqgroup_define_##name(void *arg) \ { \ qgroup_##name = taskqgroup_create(#name); \ taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \ } \ \ SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \ taskqgroup_define_##name, NULL) -#else /* SMP && !EARLY_AP_STARTUP */ + +#else /* !EARLY_AP_STARTUP */ #define TASKQGROUP_DEFINE(name, cnt, stride) \ \ struct taskqgroup *qgroup_##name; \ \ static void \ taskqgroup_define_##name(void *arg) \ { \ qgroup_##name = taskqgroup_create(#name); \ - /* Adjustment will be null unless smp_cpus == 1. */ \ - /* \ - * XXX this was intended to fix the smp_cpus == 1 case, but \ - * doesn't actually work for that. It gives thes same strange \ - * panic as adjustment at SI_SUB_INIT_IF:SI_ORDER_ANY for a \ - * device that works with a pure UP kernel. \ - */ \ - /* XXX this code is common now, so should not be ifdefed. */ \ - taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \ } \ \ SYSINIT(taskqgroup_##name, SI_SUB_INIT_IF, SI_ORDER_FIRST, \ taskqgroup_define_##name, NULL); \ \ static void \ taskqgroup_adjust_##name(void *arg) \ { \ - /* \ - * Adjustment when smp_cpus > 1 only works accidentally \ - * (when there is no device interrupt before adjustment). \ - */ \ taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \ } \ \ SYSINIT(taskqgroup_adj_##name, SI_SUB_SMP, SI_ORDER_ANY, \ taskqgroup_adjust_##name, NULL); \ -#endif /* !SMP || EARLY_AP_STARTUP */ +#endif /* EARLY_AP_STARTUP */ TASKQGROUP_DECLARE(net); #endif /* !_SYS_GTASKQUEUE_H_ */ Index: projects/clang400-import/sys/sys/libkern.h =================================================================== --- projects/clang400-import/sys/sys/libkern.h (revision 312719) +++ projects/clang400-import/sys/sys/libkern.h (revision 312720) @@ -1,225 +1,252 @@ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)libkern.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _SYS_LIBKERN_H_ #define _SYS_LIBKERN_H_ #include #include #ifdef _KERNEL #include #endif #ifndef LIBKERN_INLINE #define LIBKERN_INLINE static __inline #define LIBKERN_BODY #endif /* BCD conversions. */ extern u_char const bcd2bin_data[]; extern u_char const bin2bcd_data[]; extern char const hex2ascii_data[]; -#define bcd2bin(bcd) (bcd2bin_data[bcd]) -#define bin2bcd(bin) (bin2bcd_data[bin]) -#define hex2ascii(hex) (hex2ascii_data[hex]) +#define LIBKERN_LEN_BCD2BIN 154 +#define LIBKERN_LEN_BIN2BCD 100 +#define LIBKERN_LEN_HEX2ASCII 36 + +static inline u_char +bcd2bin(int bcd) +{ + + KASSERT(bcd >= 0 && bcd < LIBKERN_LEN_BCD2BIN, + ("invalid bcd %d", bcd)); + return (bcd2bin_data[bcd]); +} + +static inline u_char +bin2bcd(int bin) +{ + + KASSERT(bin >= 0 && bin < LIBKERN_LEN_BIN2BCD, + ("invalid bin %d", bin)); + return (bin2bcd_data[bin]); +} + +static inline char +hex2ascii(int hex) +{ + + KASSERT(hex >= 0 && hex < LIBKERN_LEN_HEX2ASCII, + ("invalid hex %d", hex)); + return (hex2ascii_data[hex]); +} static __inline int imax(int a, int b) { return (a > b ? a : b); } static __inline int imin(int a, int b) { return (a < b ? a : b); } static __inline long lmax(long a, long b) { return (a > b ? a : b); } static __inline long lmin(long a, long b) { return (a < b ? a : b); } static __inline u_int max(u_int a, u_int b) { return (a > b ? a : b); } static __inline u_int min(u_int a, u_int b) { return (a < b ? a : b); } static __inline quad_t qmax(quad_t a, quad_t b) { return (a > b ? a : b); } static __inline quad_t qmin(quad_t a, quad_t b) { return (a < b ? a : b); } static __inline u_quad_t uqmax(u_quad_t a, u_quad_t b) { return (a > b ? a : b); } static __inline u_quad_t uqmin(u_quad_t a, u_quad_t b) { return (a < b ? a : b); } static __inline u_long ulmax(u_long a, u_long b) { return (a > b ? a : b); } static __inline u_long ulmin(u_long a, u_long b) { return (a < b ? a : b); } static __inline __uintmax_t ummax(__uintmax_t a, __uintmax_t b) { return (a > b ? a : b); } static __inline __uintmax_t ummin(__uintmax_t a, __uintmax_t b) { return (a < b ? a : b); } static __inline off_t omax(off_t a, off_t b) { return (a > b ? a : b); } static __inline off_t omin(off_t a, off_t b) { return (a < b ? a : b); } static __inline int abs(int a) { return (a < 0 ? -a : a); } static __inline long labs(long a) { return (a < 0 ? -a : a); } static __inline quad_t qabs(quad_t a) { return (a < 0 ? -a : a); } #define ARC4_ENTR_NONE 0 /* Don't have entropy yet. */ #define ARC4_ENTR_HAVE 1 /* Have entropy. */ #define ARC4_ENTR_SEED 2 /* Reseeding. */ extern int arc4rand_iniseed_state; /* Prototypes for non-quad routines. */ struct malloc_type; uint32_t arc4random(void); void arc4rand(void *ptr, u_int len, int reseed); int bcmp(const void *, const void *, size_t); int timingsafe_bcmp(const void *, const void *, size_t); void *bsearch(const void *, const void *, size_t, size_t, int (*)(const void *, const void *)); #ifndef HAVE_INLINE_FFS int ffs(int); #endif #ifndef HAVE_INLINE_FFSL int ffsl(long); #endif #ifndef HAVE_INLINE_FFSLL int ffsll(long long); #endif #ifndef HAVE_INLINE_FLS int fls(int); #endif #ifndef HAVE_INLINE_FLSL int flsl(long); #endif #ifndef HAVE_INLINE_FLSLL int flsll(long long); #endif #define bitcount64(x) __bitcount64((uint64_t)(x)) #define bitcount32(x) __bitcount32((uint32_t)(x)) #define bitcount16(x) __bitcount16((uint16_t)(x)) #define bitcountl(x) __bitcountl((u_long)(x)) #define bitcount(x) __bitcount((u_int)(x)) int fnmatch(const char *, const char *, int); int locc(int, char *, u_int); void *memchr(const void *s, int c, size_t n); void *memcchr(const void *s, int c, size_t n); int memcmp(const void *b1, const void *b2, size_t len); void *memmem(const void *l, size_t l_len, const void *s, size_t s_len); void qsort(void *base, size_t nmemb, size_t size, int (*compar)(const void *, const void *)); void qsort_r(void *base, size_t nmemb, size_t size, void *thunk, int (*compar)(void *, const void *, const void *)); u_long random(void); int scanc(u_int, const u_char *, const u_char *, int); void srandom(u_long); int strcasecmp(const char *, const char *); char *strcat(char * __restrict, const char * __restrict); char *strchr(const char *, int); int strcmp(const char *, const char *); char *strcpy(char * __restrict, const char * __restrict); size_t strcspn(const char * __restrict, const char * __restrict) __pure; char *strdup(const char *__restrict, struct malloc_type *); char *strncat(char *, const char *, size_t); char *strndup(const char *__restrict, size_t, struct malloc_type *); size_t strlcat(char *, const char *, size_t); size_t strlcpy(char *, const char *, size_t); size_t strlen(const char *); int strncasecmp(const char *, const char *, size_t); int strncmp(const char *, const char *, size_t); char *strncpy(char * __restrict, const char * __restrict, size_t); size_t strnlen(const char *, size_t); char *strrchr(const char *, int); char *strsep(char **, const char *delim); size_t strspn(const char *, const char *); char *strstr(const char *, const char *); int strvalid(const char *, size_t); extern const uint32_t crc32_tab[]; static __inline uint32_t crc32_raw(const void *buf, size_t size, uint32_t crc) { const uint8_t *p = (const uint8_t *)buf; while (size--) crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8); return (crc); } static __inline uint32_t crc32(const void *buf, size_t size) { uint32_t crc; crc = crc32_raw(buf, size, ~0U); return (crc ^ ~0U); } uint32_t calculate_crc32c(uint32_t crc32c, const unsigned char *buffer, unsigned int length); LIBKERN_INLINE void *memset(void *, int, size_t); #ifdef LIBKERN_BODY LIBKERN_INLINE void * memset(void *b, int c, size_t len) { char *bb; if (c == 0) bzero(b, len); else for (bb = (char *)b; len--; ) *bb++ = c; return (b); } #endif static __inline char * index(const char *p, int ch) { return (strchr(p, ch)); } static __inline char * rindex(const char *p, int ch) { return (strrchr(p, ch)); } /* fnmatch() return values. */ #define FNM_NOMATCH 1 /* Match failed. */ /* fnmatch() flags. */ #define FNM_NOESCAPE 0x01 /* Disable backslash escaping. */ #define FNM_PATHNAME 0x02 /* Slash must be matched by slash. */ #define FNM_PERIOD 0x04 /* Period must be matched by period. */ #define FNM_LEADING_DIR 0x08 /* Ignore / after Imatch. */ #define FNM_CASEFOLD 0x10 /* Case insensitive search. */ #define FNM_IGNORECASE FNM_CASEFOLD #define FNM_FILE_NAME FNM_PATHNAME #endif /* !_SYS_LIBKERN_H_ */ Index: projects/clang400-import/usr.bin/find/find.1 =================================================================== --- projects/clang400-import/usr.bin/find/find.1 (revision 312719) +++ projects/clang400-import/usr.bin/find/find.1 (revision 312720) @@ -1,1119 +1,1120 @@ .\" Copyright (c) 1990, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" This code is derived from software contributed to Berkeley by .\" the Institute of Electrical and Electronics Engineers, Inc. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 4. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)find.1 8.7 (Berkeley) 5/9/95 .\" $FreeBSD$ .\" -.Dd April 13, 2014 +.Dd January 24, 2017 .Dt FIND 1 .Os .Sh NAME .Nm find .Nd walk a file hierarchy .Sh SYNOPSIS .Nm .Op Fl H | Fl L | Fl P .Op Fl EXdsx .Op Fl f Ar path .Ar path ... .Op Ar expression .Nm .Op Fl H | Fl L | Fl P .Op Fl EXdsx .Fl f Ar path .Op Ar path ... .Op Ar expression .Sh DESCRIPTION The .Nm utility recursively descends the directory tree for each .Ar path listed, evaluating an .Ar expression (composed of the .Dq primaries and .Dq operands listed below) in terms of each file in the tree. .Pp The options are as follows: .Bl -tag -width indent .It Fl E Interpret regular expressions followed by .Ic -regex and .Ic -iregex primaries as extended (modern) regular expressions rather than basic regular expressions (BRE's). The .Xr re_format 7 manual page fully describes both formats. .It Fl H Cause the file information and file type (see .Xr stat 2 ) returned for each symbolic link specified on the command line to be those of the file referenced by the link, not the link itself. If the referenced file does not exist, the file information and type will be for the link itself. File information of all symbolic links not on the command line is that of the link itself. .It Fl L Cause the file information and file type (see .Xr stat 2 ) returned for each symbolic link to be those of the file referenced by the link, not the link itself. If the referenced file does not exist, the file information and type will be for the link itself. .Pp This option is equivalent to the deprecated .Ic -follow primary. .It Fl P Cause the file information and file type (see .Xr stat 2 ) returned for each symbolic link to be those of the link itself. This is the default. .It Fl X Permit .Nm to be safely used in conjunction with .Xr xargs 1 . If a file name contains any of the delimiting characters used by .Xr xargs 1 , a diagnostic message is displayed on standard error, and the file is skipped. The delimiting characters include single .Pq Dq Li " ' " and double .Pq Dq Li " \*q " quotes, backslash .Pq Dq Li \e , space, tab and newline characters. .Pp However, you may wish to consider the .Fl print0 primary in conjunction with .Dq Nm xargs Fl 0 as an effective alternative. .It Fl d Cause .Nm to perform a depth-first traversal. .Pp This option is a BSD-specific equivalent of the .Ic -depth primary specified by .St -p1003.1-2001 . Refer to its description under .Sx PRIMARIES for more information. .It Fl s Cause .Nm to traverse the file hierarchies in lexicographical order, i.e., alphabetical order within each directory. Note: .Ql find -s and .Ql "find | sort" may give different results. .It Fl x Prevent .Nm from descending into directories that have a device number different than that of the file from which the descent began. .Pp This option is equivalent to the deprecated .Ic -xdev primary. .El .Sh PRIMARIES All primaries which take a numeric argument allow the number to be preceded by a plus sign .Pq Dq Li + or a minus sign .Pq Dq Li - . A preceding plus sign means .Dq more than n , a preceding minus sign means .Dq less than n and neither means .Dq exactly n . .Bl -tag -width indent .It Ic -Bmin Ar n True if the difference between the time of a file's inode creation and the time .Nm was started, rounded up to the next full minute, is .Ar n minutes. .It Ic -Bnewer Ar file Same as .Ic -newerBm . .It Ic -Btime Ar n Ns Op Cm smhdw If no units are specified, this primary evaluates to true if the difference between the time of a file's inode creation and the time .Nm was started, rounded up to the next full 24-hour period, is .Ar n 24-hour periods. .Pp If units are specified, this primary evaluates to true if the difference between the time of a file's inode creation and the time .Nm was started is exactly .Ar n units. Please refer to the .Ic -atime primary description for information on supported time units. .It Ic -acl May be used in conjunction with other primaries to locate files with extended ACLs. See .Xr acl 3 for more information. .It Ic -amin Oo Cm - Ns | Ns Cm + Oc Ns Ar n True if the difference between the file last access time and the time .Nm was started, rounded up to the next full minute, is more than .Ar n .Pq + Ns Ar n , less than .Ar n .Pq - Ns Ar n , or exactly .Ar n minutes ago. .It Ic -anewer Ar file Same as .Ic -neweram . .It Ic -atime Ar n Ns Op Cm smhdw If no units are specified, this primary evaluates to true if the difference between the file last access time and the time .Nm was started, rounded up to the next full 24-hour period, is .Ar n 24-hour periods. .Pp If units are specified, this primary evaluates to true if the difference between the file last access time and the time .Nm was started is exactly .Ar n units. Possible time units are as follows: .Pp .Bl -tag -width indent -compact .It Cm s second .It Cm m minute (60 seconds) .It Cm h hour (60 minutes) .It Cm d day (24 hours) .It Cm w week (7 days) .El .Pp Any number of units may be combined in one .Ic -atime argument, for example, .Dq Li "-atime -1h30m" . Units are probably only useful when used in conjunction with the .Cm + or .Cm - modifier. .It Ic -cmin Oo Cm - Ns | Ns Cm + Oc Ns Ar n True if the difference between the time of last change of file status information and the time .Nm was started, rounded up to the next full minute, is more than .Ar n .Pq + Ns Ar n , less than .Ar n .Pq - Ns Ar n , or exactly .Ar n minutes ago. .It Ic -cnewer Ar file Same as .Ic -newercm . .It Ic -ctime Ar n Ns Op Cm smhdw If no units are specified, this primary evaluates to true if the difference between the time of last change of file status information and the time .Nm was started, rounded up to the next full 24-hour period, is .Ar n 24-hour periods. .Pp If units are specified, this primary evaluates to true if the difference between the time of last change of file status information and the time .Nm was started is exactly .Ar n units. Please refer to the .Ic -atime primary description for information on supported time units. .It Ic -d Non-portable, BSD-specific version of .Ic depth . GNU find implements this as a primary in mistaken emulation of .Fx .Nm . .It Ic -delete Delete found files and/or directories. Always returns true. This executes from the current working directory as .Nm recurses down the tree. It will not attempt to delete a filename with a .Dq Pa / character in its pathname relative to .Dq Pa \&. for security reasons. Depth-first traversal processing is implied by this option. The .Ic -delete primary will fail to delete a directory if it is not empty. Following symlinks is incompatible with this option. .It Ic -depth Always true; same as the non-portable .Fl d option. Cause .Nm to perform a depth-first traversal, i.e., directories are visited in post-order and all entries in a directory will be acted on before the directory itself. By default, .Nm visits directories in pre-order, i.e., before their contents. Note, the default is .Em not a breadth-first traversal. .Pp The .Ic -depth primary can be useful when .Nm is used with .Xr cpio 1 to process files that are contained in directories with unusual permissions. It ensures that you have write permission while you are placing files in a directory, then sets the directory's permissions as the last thing. .It Ic -depth Ar n True if the depth of the file relative to the starting point of the traversal is .Ar n . .It Ic -empty True if the current file or directory is empty. .It Ic -exec Ar utility Oo Ar argument ... Oc Li \&; True if the program named .Ar utility returns a zero value as its exit status. Optional .Ar arguments may be passed to the utility. The expression must be terminated by a semicolon .Pq Dq Li \&; . If you invoke .Nm from a shell you may need to quote the semicolon if the shell would otherwise treat it as a control operator. If the string .Dq Li {} appears anywhere in the utility name or the arguments it is replaced by the pathname of the current file. .Ar Utility will be executed from the directory from which .Nm was executed. .Ar Utility and .Ar arguments are not subject to the further expansion of shell patterns and constructs. .It Ic -exec Ar utility Oo Ar argument ... Oc Li {} + Same as .Ic -exec , except that .Dq Li {} is replaced with as many pathnames as possible for each invocation of .Ar utility . This behaviour is similar to that of .Xr xargs 1 . The primary always returns true; if at least one invocation of .Ar utility returns a non-zero exit status, .Nm will return a non-zero exit status. .It Ic -execdir Ar utility Oo Ar argument ... Oc Li \&; The .Ic -execdir primary is identical to the .Ic -exec primary with the exception that .Ar utility will be executed from the directory that holds the current file. The filename substituted for the string .Dq Li {} is not qualified. .It Ic -execdir Ar utility Oo Ar argument ... Oc Li {} + Same as .Ic -execdir , except that .Dq Li {} is replaced with as many pathnames as possible for each invocation of .Ar utility . This behaviour is similar to that of .Xr xargs 1 . The primary always returns true; if at least one invocation of .Ar utility returns a non-zero exit status, .Nm will return a non-zero exit status. .It Ic -flags Oo Cm - Ns | Ns Cm + Oc Ns Ar flags , Ns Ar notflags The flags are specified using symbolic names (see .Xr chflags 1 ) . Those with the .Qq Li no prefix (except .Qq Li nodump ) are said to be .Ar notflags . Flags in .Ar flags are checked to be set, and flags in .Ar notflags are checked to be not set. Note that this is different from .Ic -perm , which only allows the user to specify mode bits that are set. .Pp If flags are preceded by a dash .Pq Dq Li - , this primary evaluates to true if at least all of the bits in .Ar flags and none of the bits in .Ar notflags are set in the file's flags bits. If flags are preceded by a plus .Pq Dq Li + , this primary evaluates to true if any of the bits in .Ar flags is set in the file's flags bits, or any of the bits in .Ar notflags is not set in the file's flags bits. Otherwise, this primary evaluates to true if the bits in .Ar flags exactly match the file's flags bits, and none of the .Ar flags bits match those of .Ar notflags . .It Ic -fstype Ar type True if the file is contained in a file system of type .Ar type . The .Xr lsvfs 1 command can be used to find out the types of file systems that are available on the system. In addition, there are two pseudo-types, .Dq Li local and .Dq Li rdonly . The former matches any file system physically mounted on the system where the .Nm is being executed and the latter matches any file system which is mounted read-only. .It Ic -gid Ar gname The same thing as .Ar -group Ar gname for compatibility with GNU find. GNU find imposes a restriction that .Ar gname is numeric, while .Nm does not. .It Ic -group Ar gname True if the file belongs to the group .Ar gname . If .Ar gname is numeric and there is no such group name, then .Ar gname is treated as a group ID. .It Ic -ignore_readdir_race Ignore errors because a file or a directory is deleted after reading the name from a directory. This option does not affect errors occurring on starting points. .It Ic -ilname Ar pattern Like .Ic -lname , but the match is case insensitive. This is a GNU find extension. .It Ic -iname Ar pattern Like .Ic -name , but the match is case insensitive. .It Ic -inum Ar n True if the file has inode number .Ar n . .It Ic -ipath Ar pattern Like .Ic -path , but the match is case insensitive. .It Ic -iregex Ar pattern Like .Ic -regex , but the match is case insensitive. .It Ic -iwholename Ar pattern The same thing as .Ic -ipath , for GNU find compatibility. .It Ic -links Ar n True if the file has .Ar n links. .It Ic -lname Ar pattern Like .Ic -name , but the contents of the symbolic link are matched instead of the file name. Note that this only matches broken symbolic links if symbolic links are being followed. This is a GNU find extension. .It Ic -ls This primary always evaluates to true. The following information for the current file is written to standard output: its inode number, size in 512-byte blocks, file permissions, number of hard links, owner, group, size in bytes, last modification time, and pathname. If the file is a block or character special file, the device number will be displayed instead of the size in bytes. If the file is a symbolic link, the pathname of the linked-to file will be displayed preceded by .Dq Li -> . The format is identical to that produced by .Bk -words .Dq Nm ls Fl dgils . .Ek .It Ic -maxdepth Ar n Always true; descend at most .Ar n directory levels below the command line arguments. If any .Ic -maxdepth primary is specified, it applies to the entire expression even if it would not normally be evaluated. .Dq Ic -maxdepth Li 0 limits the whole search to the command line arguments. .It Ic -mindepth Ar n Always true; do not apply any tests or actions at levels less than .Ar n . If any .Ic -mindepth primary is specified, it applies to the entire expression even if it would not normally be evaluated. .Dq Ic -mindepth Li 1 processes all but the command line arguments. .It Ic -mmin Oo Cm - Ns | Ns Cm + Oc Ns Ar n True if the difference between the file last modification time and the time .Nm was started, rounded up to the next full minute, is +more than .Ar n .Pq + Ns Ar n , less than .Ar n .Pq - Ns Ar n , or exactly .Ar n minutes ago. .It Ic -mnewer Ar file Same as .Ic -newer . .It Ic -mount The same thing as .Ic -xdev , for GNU find compatibility. .It Ic -mtime Ar n Ns Op Cm smhdw If no units are specified, this primary evaluates to true if the difference between the file last modification time and the time .Nm was started, rounded up to the next full 24-hour period, is .Ar n 24-hour periods. .Pp If units are specified, this primary evaluates to true if the difference between the file last modification time and the time .Nm was started is exactly .Ar n units. Please refer to the .Ic -atime primary description for information on supported time units. .It Ic -name Ar pattern True if the last component of the pathname being examined matches .Ar pattern . Special shell pattern matching characters .Dq ( Li \&[ , .Dq Li \&] , .Dq Li * , and .Dq Li \&? ) may be used as part of .Ar pattern . These characters may be matched explicitly by escaping them with a backslash .Pq Dq Li \e . .It Ic -newer Ar file True if the current file has a more recent last modification time than .Ar file . .It Ic -newer Ns Ar X Ns Ar Y Ar file True if the current file has a more recent last access time .Pq Ar X Ns = Ns Cm a , inode creation time .Pq Ar X Ns = Ns Cm B , change time .Pq Ar X Ns = Ns Cm c , or modification time .Pq Ar X Ns = Ns Cm m than the last access time .Pq Ar Y Ns = Ns Cm a , inode creation time .Pq Ar Y Ns = Ns Cm B , change time .Pq Ar Y Ns = Ns Cm c , or modification time .Pq Ar Y Ns = Ns Cm m of .Ar file . In addition, if .Ar Y Ns = Ns Cm t , then .Ar file is instead interpreted as a direct date specification of the form understood by .Xr cvs 1 . Note that .Ic -newermm is equivalent to .Ic -newer . .It Ic -nogroup True if the file belongs to an unknown group. .It Ic -noignore_readdir_race Turn off the effect of .Ic -ignore_readdir_race . This is default behaviour. .It Ic -noleaf This option is for GNU find compatibility. In GNU find it disables an optimization not relevant to .Nm , so it is ignored. .It Ic -nouser True if the file belongs to an unknown user. .It Ic -ok Ar utility Oo Ar argument ... Oc Li \&; The .Ic -ok primary is identical to the .Ic -exec primary with the exception that .Nm requests user affirmation for the execution of the .Ar utility by printing a message to the terminal and reading a response. If the response is not affirmative .Ql ( y in the .Dq Li POSIX locale), the command is not executed and the value of the .Ic -ok expression is false. .It Ic -okdir Ar utility Oo Ar argument ... Oc Li \&; The .Ic -okdir primary is identical to the .Ic -execdir primary with the same exception as described for the .Ic -ok primary. .It Ic -path Ar pattern True if the pathname being examined matches .Ar pattern . Special shell pattern matching characters .Dq ( Li \&[ , .Dq Li \&] , .Dq Li * , and .Dq Li \&? ) may be used as part of .Ar pattern . These characters may be matched explicitly by escaping them with a backslash .Pq Dq Li \e . Slashes .Pq Dq Li / are treated as normal characters and do not have to be matched explicitly. .It Ic -perm Oo Cm - Ns | Ns Cm + Oc Ns Ar mode The .Ar mode may be either symbolic (see .Xr chmod 1 ) or an octal number. If the .Ar mode is symbolic, a starting value of zero is assumed and the .Ar mode sets or clears permissions without regard to the process' file mode creation mask. If the .Ar mode is octal, only bits 07777 .Pq Dv S_ISUID | S_ISGID | S_ISTXT | S_IRWXU | S_IRWXG | S_IRWXO of the file's mode bits participate in the comparison. If the .Ar mode is preceded by a dash .Pq Dq Li - , this primary evaluates to true if at least all of the bits in the .Ar mode are set in the file's mode bits. If the .Ar mode is preceded by a plus .Pq Dq Li + , this primary evaluates to true if any of the bits in the .Ar mode are set in the file's mode bits. Otherwise, this primary evaluates to true if the bits in the .Ar mode exactly match the file's mode bits. Note, the first character of a symbolic mode may not be a dash .Pq Dq Li - . .It Ic -print This primary always evaluates to true. It prints the pathname of the current file to standard output. If none of .Ic -exec , -ls , -print0 , or .Ic -ok is specified, the given expression shall be effectively replaced by .Cm \&( Ar "given expression" Cm \&) Ic -print . .It Ic -print0 This primary always evaluates to true. It prints the pathname of the current file to standard output, followed by an .Tn ASCII .Dv NUL character (character code 0). .It Ic -prune This primary always evaluates to true. It causes .Nm to not descend into the current file. Note, the .Ic -prune primary has no effect if the .Fl d option was specified. .It Ic -quit Causes .Nm to terminate immediately. .It Ic -regex Ar pattern True if the whole path of the file matches .Ar pattern using regular expression. To match a file named .Dq Pa ./foo/xyzzy , you can use the regular expression .Dq Li ".*/[xyz]*" or .Dq Li ".*/foo/.*" , but not .Dq Li xyzzy or .Dq Li /foo/ . .It Ic -samefile Ar name True if the file is a hard link to .Ar name . If the command option .Ic -L is specified, it is also true if the file is a symbolic link and points to .Ar name . .It Ic -size Ar n Ns Op Cm ckMGTP True if the file's size, rounded up, in 512-byte blocks is .Ar n . If .Ar n is followed by a .Cm c , then the primary is true if the file's size is .Ar n bytes (characters). Similarly if .Ar n is followed by a scale indicator then the file's size is compared to .Ar n scaled as: .Pp .Bl -tag -width indent -compact .It Cm k kilobytes (1024 bytes) .It Cm M megabytes (1024 kilobytes) .It Cm G gigabytes (1024 megabytes) .It Cm T terabytes (1024 gigabytes) .It Cm P petabytes (1024 terabytes) .El .It Ic -sparse True if the current file is sparse, i.e. has fewer blocks allocated than expected based on its size in bytes. This might also match files that have been compressed by the filesystem. .It Ic -type Ar t True if the file is of the specified type. Possible file types are as follows: .Pp .Bl -tag -width indent -compact .It Cm b block special .It Cm c character special .It Cm d directory .It Cm f regular file .It Cm l symbolic link .It Cm p FIFO .It Cm s socket .El .It Ic -uid Ar uname The same thing as .Ar -user Ar uname for compatibility with GNU find. GNU find imposes a restriction that .Ar uname is numeric, while .Nm does not. .It Ic -user Ar uname True if the file belongs to the user .Ar uname . If .Ar uname is numeric and there is no such user name, then .Ar uname is treated as a user ID. .It Ic -wholename Ar pattern The same thing as .Ic -path , for GNU find compatibility. .El .Sh OPERATORS The primaries may be combined using the following operators. The operators are listed in order of decreasing precedence. .Pp .Bl -tag -width indent -compact .It Cm \&( Ar expression Cm \&) This evaluates to true if the parenthesized expression evaluates to true. .Pp .It Cm \&! Ar expression .It Cm -not Ar expression This is the unary .Tn NOT operator. It evaluates to true if the expression is false. .Pp .It Cm -false Always false. .It Cm -true Always true. .Pp .It Ar expression Cm -and Ar expression .It Ar expression expression The .Cm -and operator is the logical .Tn AND operator. As it is implied by the juxtaposition of two expressions it does not have to be specified. The expression evaluates to true if both expressions are true. The second expression is not evaluated if the first expression is false. .Pp .It Ar expression Cm -or Ar expression The .Cm -or operator is the logical .Tn OR operator. The expression evaluates to true if either the first or the second expression is true. The second expression is not evaluated if the first expression is true. .El .Pp All operands and primaries must be separate arguments to .Nm . Primaries which themselves take arguments expect each argument to be a separate argument to .Nm . .Sh ENVIRONMENT The .Ev LANG , LC_ALL , LC_COLLATE , LC_CTYPE , LC_MESSAGES and .Ev LC_TIME environment variables affect the execution of the .Nm utility as described in .Xr environ 7 . .Sh EXAMPLES The following examples are shown as given to the shell: .Bl -tag -width indent .It Li "find / \e! -name \*q*.c\*q -print" Print out a list of all the files whose names do not end in .Pa .c . .It Li "find / -newer ttt -user wnj -print" Print out a list of all the files owned by user .Dq wnj that are newer than the file .Pa ttt . .It Li "find / \e! \e( -newer ttt -user wnj \e) -print" Print out a list of all the files which are not both newer than .Pa ttt and owned by .Dq wnj . .It Li "find / \e( -newer ttt -or -user wnj \e) -print" Print out a list of all the files that are either owned by .Dq wnj or that are newer than .Pa ttt . .It Li "find / -newerct '1 minute ago' -print" Print out a list of all the files whose inode change time is more recent than the current time minus one minute. .It Li "find / -type f -exec echo {} \e;" Use the .Xr echo 1 command to print out a list of all the files. .It Li "find -L /usr/ports/packages -type l -exec rm -- {} +" Delete all broken symbolic links in .Pa /usr/ports/packages . .It Li "find /usr/src -name CVS -prune -o -depth +6 -print" Find files and directories that are at least seven levels deep in the working directory .Pa /usr/src . .It Li "find /usr/src -name CVS -prune -o -mindepth 7 -print" Is not equivalent to the previous example, since .Ic -prune is not evaluated below level seven. .El .Sh COMPATIBILITY The .Ic -follow primary is deprecated; the .Fl L option should be used instead. See the .Sx STANDARDS section below for details. .Sh SEE ALSO .Xr chflags 1 , .Xr chmod 1 , .Xr cvs 1 , .Xr locate 1 , .Xr lsvfs 1 , .Xr whereis 1 , .Xr which 1 , .Xr xargs 1 , .Xr stat 2 , .Xr acl 3 , .Xr fts 3 , .Xr getgrent 3 , .Xr getpwent 3 , .Xr strmode 3 , .Xr re_format 7 , .Xr symlink 7 .Sh STANDARDS The .Nm utility syntax is a superset of the syntax specified by the .St -p1003.1-2001 standard. .Pp All the single character options except .Fl H and .Fl L as well as .Ic -amin , -anewer , -cmin , -cnewer , -delete , -empty , -fstype , .Ic -iname , -inum , -iregex , -ls , -maxdepth , -mindepth , -mmin , .Ic -path , -print0 , -regex , -sparse and all of the .Ic -B* birthtime related primaries are extensions to .St -p1003.1-2001 . .Pp Historically, the .Fl d , L and .Fl x options were implemented using the primaries .Ic -depth , -follow , and .Ic -xdev . These primaries always evaluated to true. As they were really global variables that took effect before the traversal began, some legal expressions could have unexpected results. An example is the expression .Ic -print Cm -o Ic -depth . As .Ic -print always evaluates to true, the standard order of evaluation implies that .Ic -depth would never be evaluated. This is not the case. .Pp The operator .Cm -or was implemented as .Cm -o , and the operator .Cm -and was implemented as .Cm -a . .Pp Historic implementations of the .Ic -exec and .Ic -ok primaries did not replace the string .Dq Li {} in the utility name or the utility arguments if it had preceding or following non-whitespace characters. This version replaces it no matter where in the utility name or arguments it appears. .Pp The .Fl E option was inspired by the equivalent .Xr grep 1 and .Xr sed 1 options. .Sh HISTORY A .Nm command appeared in .At v1 . .Sh BUGS The special characters used by .Nm are also special characters to many shell programs. In particular, the characters .Dq Li * , .Dq Li \&[ , .Dq Li \&] , .Dq Li \&? , .Dq Li \&( , .Dq Li \&) , .Dq Li \&! , .Dq Li \e and .Dq Li \&; may have to be escaped from the shell. .Pp As there is no delimiter separating options and file names or file names and the .Ar expression , it is difficult to specify files named .Pa -xdev or .Pa \&! . These problems are handled by the .Fl f option and the .Xr getopt 3 .Dq Fl Fl construct. .Pp The .Ic -delete primary does not interact well with other options that cause the file system tree traversal options to be changed. .Pp The .Ic -mindepth and .Ic -maxdepth primaries are actually global options (as documented above). They should probably be replaced by options which look like options. Index: projects/clang400-import/usr.bin/iscsictl/iscsictl.c =================================================================== --- projects/clang400-import/usr.bin/iscsictl/iscsictl.c (revision 312719) +++ projects/clang400-import/usr.bin/iscsictl/iscsictl.c (revision 312720) @@ -1,1035 +1,1035 @@ /*- * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iscsictl.h" struct conf * conf_new(void) { struct conf *conf; conf = calloc(1, sizeof(*conf)); if (conf == NULL) xo_err(1, "calloc"); TAILQ_INIT(&conf->conf_targets); return (conf); } struct target * target_find(struct conf *conf, const char *nickname) { struct target *targ; TAILQ_FOREACH(targ, &conf->conf_targets, t_next) { if (targ->t_nickname != NULL && strcasecmp(targ->t_nickname, nickname) == 0) return (targ); } return (NULL); } struct target * target_new(struct conf *conf) { struct target *targ; targ = calloc(1, sizeof(*targ)); if (targ == NULL) xo_err(1, "calloc"); targ->t_conf = conf; TAILQ_INSERT_TAIL(&conf->conf_targets, targ, t_next); return (targ); } void target_delete(struct target *targ) { TAILQ_REMOVE(&targ->t_conf->conf_targets, targ, t_next); free(targ); } static char * default_initiator_name(void) { char *name; size_t namelen; int error; namelen = _POSIX_HOST_NAME_MAX + strlen(DEFAULT_IQN); name = calloc(1, namelen + 1); if (name == NULL) xo_err(1, "calloc"); strcpy(name, DEFAULT_IQN); error = gethostname(name + strlen(DEFAULT_IQN), namelen - strlen(DEFAULT_IQN)); if (error != 0) xo_err(1, "gethostname"); return (name); } static bool valid_hex(const char ch) { switch (ch) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'A': case 'b': case 'B': case 'c': case 'C': case 'd': case 'D': case 'e': case 'E': case 'f': case 'F': return (true); default: return (false); } } int parse_enable(const char *enable) { if (enable == NULL) return (ENABLE_UNSPECIFIED); if (strcasecmp(enable, "on") == 0 || strcasecmp(enable, "yes") == 0) return (ENABLE_ON); if (strcasecmp(enable, "off") == 0 || strcasecmp(enable, "no") == 0) return (ENABLE_OFF); return (ENABLE_UNSPECIFIED); } bool valid_iscsi_name(const char *name) { int i; if (strlen(name) >= MAX_NAME_LEN) { xo_warnx("overlong name for \"%s\"; max length allowed " "by iSCSI specification is %d characters", name, MAX_NAME_LEN); return (false); } /* * In the cases below, we don't return an error, just in case the admin * was right, and we're wrong. */ if (strncasecmp(name, "iqn.", strlen("iqn.")) == 0) { for (i = strlen("iqn."); name[i] != '\0'; i++) { /* * XXX: We should verify UTF-8 normalisation, as defined * by 3.2.6.2: iSCSI Name Encoding. */ if (isalnum(name[i])) continue; if (name[i] == '-' || name[i] == '.' || name[i] == ':') continue; xo_warnx("invalid character \"%c\" in iSCSI name " "\"%s\"; allowed characters are letters, digits, " "'-', '.', and ':'", name[i], name); break; } /* * XXX: Check more stuff: valid date and a valid reversed domain. */ } else if (strncasecmp(name, "eui.", strlen("eui.")) == 0) { if (strlen(name) != strlen("eui.") + 16) xo_warnx("invalid iSCSI name \"%s\"; the \"eui.\" " "should be followed by exactly 16 hexadecimal " "digits", name); for (i = strlen("eui."); name[i] != '\0'; i++) { if (!valid_hex(name[i])) { xo_warnx("invalid character \"%c\" in iSCSI " "name \"%s\"; allowed characters are 1-9 " "and A-F", name[i], name); break; } } } else if (strncasecmp(name, "naa.", strlen("naa.")) == 0) { if (strlen(name) > strlen("naa.") + 32) xo_warnx("invalid iSCSI name \"%s\"; the \"naa.\" " "should be followed by at most 32 hexadecimal " "digits", name); for (i = strlen("naa."); name[i] != '\0'; i++) { if (!valid_hex(name[i])) { xo_warnx("invalid character \"%c\" in ISCSI " "name \"%s\"; allowed characters are 1-9 " "and A-F", name[i], name); break; } } } else { xo_warnx("invalid iSCSI name \"%s\"; should start with " "either \".iqn\", \"eui.\", or \"naa.\"", name); } return (true); } void conf_verify(struct conf *conf) { struct target *targ; TAILQ_FOREACH(targ, &conf->conf_targets, t_next) { assert(targ->t_nickname != NULL); if (targ->t_session_type == SESSION_TYPE_UNSPECIFIED) targ->t_session_type = SESSION_TYPE_NORMAL; if (targ->t_session_type == SESSION_TYPE_NORMAL && targ->t_name == NULL) xo_errx(1, "missing TargetName for target \"%s\"", targ->t_nickname); if (targ->t_session_type == SESSION_TYPE_DISCOVERY && targ->t_name != NULL) xo_errx(1, "cannot specify TargetName for discovery " "sessions for target \"%s\"", targ->t_nickname); if (targ->t_name != NULL) { if (valid_iscsi_name(targ->t_name) == false) xo_errx(1, "invalid target name \"%s\"", targ->t_name); } if (targ->t_protocol == PROTOCOL_UNSPECIFIED) targ->t_protocol = PROTOCOL_ISCSI; if (targ->t_address == NULL) xo_errx(1, "missing TargetAddress for target \"%s\"", targ->t_nickname); if (targ->t_initiator_name == NULL) targ->t_initiator_name = default_initiator_name(); if (valid_iscsi_name(targ->t_initiator_name) == false) xo_errx(1, "invalid initiator name \"%s\"", targ->t_initiator_name); if (targ->t_header_digest == DIGEST_UNSPECIFIED) targ->t_header_digest = DIGEST_NONE; if (targ->t_data_digest == DIGEST_UNSPECIFIED) targ->t_data_digest = DIGEST_NONE; if (targ->t_auth_method == AUTH_METHOD_UNSPECIFIED) { if (targ->t_user != NULL || targ->t_secret != NULL || targ->t_mutual_user != NULL || targ->t_mutual_secret != NULL) targ->t_auth_method = AUTH_METHOD_CHAP; else targ->t_auth_method = AUTH_METHOD_NONE; } if (targ->t_auth_method == AUTH_METHOD_CHAP) { if (targ->t_user == NULL) { xo_errx(1, "missing chapIName for target \"%s\"", targ->t_nickname); } if (targ->t_secret == NULL) xo_errx(1, "missing chapSecret for target \"%s\"", targ->t_nickname); if (targ->t_mutual_user != NULL || targ->t_mutual_secret != NULL) { if (targ->t_mutual_user == NULL) xo_errx(1, "missing tgtChapName for " "target \"%s\"", targ->t_nickname); if (targ->t_mutual_secret == NULL) xo_errx(1, "missing tgtChapSecret for " "target \"%s\"", targ->t_nickname); } } } } static void conf_from_target(struct iscsi_session_conf *conf, const struct target *targ) { memset(conf, 0, sizeof(*conf)); /* * XXX: Check bounds and return error instead of silently truncating. */ if (targ->t_initiator_name != NULL) strlcpy(conf->isc_initiator, targ->t_initiator_name, sizeof(conf->isc_initiator)); if (targ->t_initiator_address != NULL) strlcpy(conf->isc_initiator_addr, targ->t_initiator_address, sizeof(conf->isc_initiator_addr)); if (targ->t_initiator_alias != NULL) strlcpy(conf->isc_initiator_alias, targ->t_initiator_alias, sizeof(conf->isc_initiator_alias)); if (targ->t_name != NULL) strlcpy(conf->isc_target, targ->t_name, sizeof(conf->isc_target)); if (targ->t_address != NULL) strlcpy(conf->isc_target_addr, targ->t_address, sizeof(conf->isc_target_addr)); if (targ->t_user != NULL) strlcpy(conf->isc_user, targ->t_user, sizeof(conf->isc_user)); if (targ->t_secret != NULL) strlcpy(conf->isc_secret, targ->t_secret, sizeof(conf->isc_secret)); if (targ->t_mutual_user != NULL) strlcpy(conf->isc_mutual_user, targ->t_mutual_user, sizeof(conf->isc_mutual_user)); if (targ->t_mutual_secret != NULL) strlcpy(conf->isc_mutual_secret, targ->t_mutual_secret, sizeof(conf->isc_mutual_secret)); if (targ->t_session_type == SESSION_TYPE_DISCOVERY) conf->isc_discovery = 1; if (targ->t_enable != ENABLE_OFF) conf->isc_enable = 1; if (targ->t_protocol == PROTOCOL_ISER) conf->isc_iser = 1; if (targ->t_offload != NULL) strlcpy(conf->isc_offload, targ->t_offload, sizeof(conf->isc_offload)); if (targ->t_header_digest == DIGEST_CRC32C) conf->isc_header_digest = ISCSI_DIGEST_CRC32C; else conf->isc_header_digest = ISCSI_DIGEST_NONE; if (targ->t_data_digest == DIGEST_CRC32C) conf->isc_data_digest = ISCSI_DIGEST_CRC32C; else conf->isc_data_digest = ISCSI_DIGEST_NONE; } static int kernel_add(int iscsi_fd, const struct target *targ) { struct iscsi_session_add isa; int error; memset(&isa, 0, sizeof(isa)); conf_from_target(&isa.isa_conf, targ); error = ioctl(iscsi_fd, ISCSISADD, &isa); if (error != 0) xo_warn("ISCSISADD"); return (error); } static int kernel_modify(int iscsi_fd, unsigned int session_id, const struct target *targ) { struct iscsi_session_modify ism; int error; memset(&ism, 0, sizeof(ism)); ism.ism_session_id = session_id; conf_from_target(&ism.ism_conf, targ); error = ioctl(iscsi_fd, ISCSISMODIFY, &ism); if (error != 0) xo_warn("ISCSISMODIFY"); return (error); } static void kernel_modify_some(int iscsi_fd, unsigned int session_id, const char *target, const char *target_addr, const char *user, const char *secret, int enable) { struct iscsi_session_state *states = NULL; struct iscsi_session_state *state; struct iscsi_session_conf *conf; struct iscsi_session_list isl; struct iscsi_session_modify ism; unsigned int i, nentries = 1; int error; for (;;) { states = realloc(states, nentries * sizeof(struct iscsi_session_state)); if (states == NULL) xo_err(1, "realloc"); memset(&isl, 0, sizeof(isl)); isl.isl_nentries = nentries; isl.isl_pstates = states; error = ioctl(iscsi_fd, ISCSISLIST, &isl); if (error != 0 && errno == EMSGSIZE) { nentries *= 4; continue; } break; } if (error != 0) xo_errx(1, "ISCSISLIST"); for (i = 0; i < isl.isl_nentries; i++) { state = &states[i]; if (state->iss_id == session_id) break; } if (i == isl.isl_nentries) xo_errx(1, "session-id %u not found", session_id); conf = &state->iss_conf; if (target != NULL) strlcpy(conf->isc_target, target, sizeof(conf->isc_target)); if (target_addr != NULL) strlcpy(conf->isc_target_addr, target_addr, sizeof(conf->isc_target_addr)); if (user != NULL) strlcpy(conf->isc_user, user, sizeof(conf->isc_user)); if (secret != NULL) strlcpy(conf->isc_secret, secret, sizeof(conf->isc_secret)); if (enable == ENABLE_ON) conf->isc_enable = 1; else if (enable == ENABLE_OFF) conf->isc_enable = 0; memset(&ism, 0, sizeof(ism)); ism.ism_session_id = session_id; memcpy(&ism.ism_conf, conf, sizeof(ism.ism_conf)); error = ioctl(iscsi_fd, ISCSISMODIFY, &ism); if (error != 0) xo_warn("ISCSISMODIFY"); } static int kernel_remove(int iscsi_fd, const struct target *targ) { struct iscsi_session_remove isr; int error; memset(&isr, 0, sizeof(isr)); conf_from_target(&isr.isr_conf, targ); error = ioctl(iscsi_fd, ISCSISREMOVE, &isr); if (error != 0) xo_warn("ISCSISREMOVE"); return (error); } /* * XXX: Add filtering. */ static int kernel_list(int iscsi_fd, const struct target *targ __unused, int verbose) { struct iscsi_session_state *states = NULL; const struct iscsi_session_state *state; const struct iscsi_session_conf *conf; struct iscsi_session_list isl; unsigned int i, nentries = 1; int error; for (;;) { states = realloc(states, nentries * sizeof(struct iscsi_session_state)); if (states == NULL) xo_err(1, "realloc"); memset(&isl, 0, sizeof(isl)); isl.isl_nentries = nentries; isl.isl_pstates = states; error = ioctl(iscsi_fd, ISCSISLIST, &isl); if (error != 0 && errno == EMSGSIZE) { nentries *= 4; continue; } break; } if (error != 0) { xo_warn("ISCSISLIST"); return (error); } if (verbose != 0) { xo_open_list("session"); for (i = 0; i < isl.isl_nentries; i++) { state = &states[i]; conf = &state->iss_conf; xo_open_instance("session"); /* * Display-only modifier as this information * is also present within the 'session' container */ - xo_emit("{L:/%-25s}{V:sessionId/%u}\n", + xo_emit("{L:/%-26s}{V:sessionId/%u}\n", "Session ID:", state->iss_id); xo_open_container("initiator"); - xo_emit("{L:/%-25s}{V:name/%s}\n", + xo_emit("{L:/%-26s}{V:name/%s}\n", "Initiator name:", conf->isc_initiator); - xo_emit("{L:/%-25s}{V:portal/%s}\n", + xo_emit("{L:/%-26s}{V:portal/%s}\n", "Initiator portal:", conf->isc_initiator_addr); - xo_emit("{L:/%-25s}{V:alias/%s}\n", + xo_emit("{L:/%-26s}{V:alias/%s}\n", "Initiator alias:", conf->isc_initiator_alias); xo_close_container("initiator"); xo_open_container("target"); - xo_emit("{L:/%-25s}{V:name/%s}\n", + xo_emit("{L:/%-26s}{V:name/%s}\n", "Target name:", conf->isc_target); - xo_emit("{L:/%-25s}{V:portal/%s}\n", + xo_emit("{L:/%-26s}{V:portal/%s}\n", "Target portal:", conf->isc_target_addr); - xo_emit("{L:/%-25s}{V:alias/%s}\n", + xo_emit("{L:/%-26s}{V:alias/%s}\n", "Target alias:", state->iss_target_alias); xo_close_container("target"); xo_open_container("auth"); - xo_emit("{L:/%-25s}{V:user/%s}\n", + xo_emit("{L:/%-26s}{V:user/%s}\n", "User:", conf->isc_user); - xo_emit("{L:/%-25s}{V:secret/%s}\n", + xo_emit("{L:/%-26s}{V:secret/%s}\n", "Secret:", conf->isc_secret); - xo_emit("{L:/%-25s}{V:mutualUser/%s}\n", + xo_emit("{L:/%-26s}{V:mutualUser/%s}\n", "Mutual user:", conf->isc_mutual_user); - xo_emit("{L:/%-25s}{V:mutualSecret/%s}\n", + xo_emit("{L:/%-26s}{V:mutualSecret/%s}\n", "Mutual secret:", conf->isc_mutual_secret); xo_close_container("auth"); - xo_emit("{L:/%-25s}{V:type/%s}\n", + xo_emit("{L:/%-26s}{V:type/%s}\n", "Session type:", conf->isc_discovery ? "Discovery" : "Normal"); - xo_emit("{L:/%-25s}{V:enable/%s}\n", + xo_emit("{L:/%-26s}{V:enable/%s}\n", "Enable:", conf->isc_enable ? "Yes" : "No"); - xo_emit("{L:/%-25s}{V:state/%s}\n", + xo_emit("{L:/%-26s}{V:state/%s}\n", "Session state:", state->iss_connected ? "Connected" : "Disconnected"); - xo_emit("{L:/%-25s}{V:failureReason/%s}\n", + xo_emit("{L:/%-26s}{V:failureReason/%s}\n", "Failure reason:", state->iss_reason); - xo_emit("{L:/%-25s}{V:headerDigest/%s}\n", + xo_emit("{L:/%-26s}{V:headerDigest/%s}\n", "Header digest:", state->iss_header_digest == ISCSI_DIGEST_CRC32C ? "CRC32C" : "None"); - xo_emit("{L:/%-25s}{V:dataDigest/%s}\n", + xo_emit("{L:/%-26s}{V:dataDigest/%s}\n", "Data digest:", state->iss_data_digest == ISCSI_DIGEST_CRC32C ? "CRC32C" : "None"); - xo_emit("{L:/%-25s}{V:recvDataSegmentLen/%d}\n", + xo_emit("{L:/%-26s}{V:recvDataSegmentLen/%d}\n", "MaxRecvDataSegmentLength:", state->iss_max_recv_data_segment_length); - xo_emit("{L:/%-25s}{V:sendDataSegmentLen/%d}\n", + xo_emit("{L:/%-26s}{V:sendDataSegmentLen/%d}\n", "MaxSendDataSegmentLength:", state->iss_max_send_data_segment_length); - xo_emit("{L:/%-25s}{V:maxBurstLen/%d}\n", + xo_emit("{L:/%-26s}{V:maxBurstLen/%d}\n", "MaxBurstLen:", state->iss_max_burst_length); - xo_emit("{L:/%-25s}{V:firstBurstLen/%d}\n", + xo_emit("{L:/%-26s}{V:firstBurstLen/%d}\n", "FirstBurstLen:", state->iss_first_burst_length); - xo_emit("{L:/%-25s}{V:immediateData/%s}\n", + xo_emit("{L:/%-26s}{V:immediateData/%s}\n", "ImmediateData:", state->iss_immediate_data ? "Yes" : "No"); - xo_emit("{L:/%-25s}{V:iSER/%s}\n", + xo_emit("{L:/%-26s}{V:iSER/%s}\n", "iSER (RDMA):", conf->isc_iser ? "Yes" : "No"); - xo_emit("{L:/%-25s}{V:offloadDriver/%s}\n", + xo_emit("{L:/%-26s}{V:offloadDriver/%s}\n", "Offload driver:", state->iss_offload); - xo_emit("{L:/%-25s}", + xo_emit("{L:/%-26s}", "Device nodes:"); print_periphs(state->iss_id); xo_emit("\n\n"); xo_close_instance("session"); } xo_close_list("session"); } else { xo_emit("{T:/%-36s} {T:/%-16s} {T:/%s}\n", "Target name", "Target portal", "State"); if (isl.isl_nentries != 0) xo_open_list("session"); for (i = 0; i < isl.isl_nentries; i++) { state = &states[i]; conf = &state->iss_conf; xo_open_instance("session"); xo_emit("{V:name/%-36s/%s} {V:portal/%-16s/%s} ", conf->isc_target, conf->isc_target_addr); if (state->iss_reason[0] != '\0') { xo_emit("{V:state/%s}\n", state->iss_reason); } else { if (conf->isc_discovery) { xo_emit("{V:state}\n", "Discovery"); } else if (conf->isc_enable == 0) { xo_emit("{V:state}\n", "Disabled"); } else if (state->iss_connected) { xo_emit("{V:state}: ", "Connected"); print_periphs(state->iss_id); xo_emit("\n"); } else { xo_emit("{V:state}\n", "Disconnected"); } } xo_close_instance("session"); } if (isl.isl_nentries != 0) xo_close_list("session"); } return (0); } static int kernel_wait(int iscsi_fd, int timeout) { struct iscsi_session_state *states = NULL; const struct iscsi_session_state *state; struct iscsi_session_list isl; unsigned int i, nentries = 1; bool all_connected; int error; for (;;) { for (;;) { states = realloc(states, nentries * sizeof(struct iscsi_session_state)); if (states == NULL) xo_err(1, "realloc"); memset(&isl, 0, sizeof(isl)); isl.isl_nentries = nentries; isl.isl_pstates = states; error = ioctl(iscsi_fd, ISCSISLIST, &isl); if (error != 0 && errno == EMSGSIZE) { nentries *= 4; continue; } break; } if (error != 0) { xo_warn("ISCSISLIST"); return (error); } all_connected = true; for (i = 0; i < isl.isl_nentries; i++) { state = &states[i]; if (!state->iss_connected) { all_connected = false; break; } } if (all_connected) return (0); sleep(1); if (timeout > 0) { timeout--; if (timeout == 0) return (1); } } } static void usage(void) { fprintf(stderr, "usage: iscsictl -A -p portal -t target " "[-u user -s secret] [-w timeout] [-e on | off]\n"); fprintf(stderr, " iscsictl -A -d discovery-host " "[-u user -s secret] [-e on | off]\n"); fprintf(stderr, " iscsictl -A -a [-c path]\n"); fprintf(stderr, " iscsictl -A -n nickname [-c path]\n"); fprintf(stderr, " iscsictl -M -i session-id [-p portal] " "[-t target] [-u user] [-s secret] [-e on | off]\n"); fprintf(stderr, " iscsictl -M -i session-id -n nickname " "[-c path]\n"); fprintf(stderr, " iscsictl -R [-p portal] [-t target]\n"); fprintf(stderr, " iscsictl -R -a\n"); fprintf(stderr, " iscsictl -R -n nickname [-c path]\n"); fprintf(stderr, " iscsictl -L [-v] [-w timeout]\n"); exit(1); } int main(int argc, char **argv) { int Aflag = 0, Mflag = 0, Rflag = 0, Lflag = 0, aflag = 0, rflag = 0, vflag = 0; const char *conf_path = DEFAULT_CONFIG_PATH; char *nickname = NULL, *discovery_host = NULL, *portal = NULL, *target = NULL, *user = NULL, *secret = NULL; int timeout = -1, enable = ENABLE_UNSPECIFIED; long long session_id = -1; char *end; int ch, error, iscsi_fd, retval, saved_errno; int failed = 0; struct conf *conf; struct target *targ; argc = xo_parse_args(argc, argv); xo_open_container("iscsictl"); while ((ch = getopt(argc, argv, "AMRLac:d:e:i:n:p:rt:u:s:vw:")) != -1) { switch (ch) { case 'A': Aflag = 1; break; case 'M': Mflag = 1; break; case 'R': Rflag = 1; break; case 'L': Lflag = 1; break; case 'a': aflag = 1; break; case 'c': conf_path = optarg; break; case 'd': discovery_host = optarg; break; case 'e': enable = parse_enable(optarg); if (enable == ENABLE_UNSPECIFIED) { xo_errx(1, "invalid argument to -e, " "must be either \"on\" or \"off\""); } break; case 'i': session_id = strtol(optarg, &end, 10); if ((size_t)(end - optarg) != strlen(optarg)) xo_errx(1, "trailing characters after session-id"); if (session_id < 0) xo_errx(1, "session-id cannot be negative"); if (session_id > UINT_MAX) xo_errx(1, "session-id cannot be greater than %u", UINT_MAX); break; case 'n': nickname = optarg; break; case 'p': portal = optarg; break; case 'r': rflag = 1; break; case 't': target = optarg; break; case 'u': user = optarg; break; case 's': secret = optarg; break; case 'v': vflag = 1; break; case 'w': timeout = strtol(optarg, &end, 10); if ((size_t)(end - optarg) != strlen(optarg)) xo_errx(1, "trailing characters after timeout"); if (timeout < 0) xo_errx(1, "timeout cannot be negative"); break; case '?': default: usage(); } } argc -= optind; if (argc != 0) usage(); if (Aflag + Mflag + Rflag + Lflag == 0) Lflag = 1; if (Aflag + Mflag + Rflag + Lflag > 1) xo_errx(1, "at most one of -A, -M, -R, or -L may be specified"); /* * Note that we ignore unnecessary/inapplicable "-c" flag; so that * people can do something like "alias ISCSICTL="iscsictl -c path" * in shell scripts. */ if (Aflag != 0) { if (aflag != 0) { if (enable != ENABLE_UNSPECIFIED) xo_errx(1, "-a and -e and mutually exclusive"); if (portal != NULL) xo_errx(1, "-a and -p and mutually exclusive"); if (target != NULL) xo_errx(1, "-a and -t and mutually exclusive"); if (user != NULL) xo_errx(1, "-a and -u and mutually exclusive"); if (secret != NULL) xo_errx(1, "-a and -s and mutually exclusive"); if (nickname != NULL) xo_errx(1, "-a and -n and mutually exclusive"); if (discovery_host != NULL) xo_errx(1, "-a and -d and mutually exclusive"); if (rflag != 0) xo_errx(1, "-a and -r and mutually exclusive"); } else if (nickname != NULL) { if (enable != ENABLE_UNSPECIFIED) xo_errx(1, "-n and -e and mutually exclusive"); if (portal != NULL) xo_errx(1, "-n and -p and mutually exclusive"); if (target != NULL) xo_errx(1, "-n and -t and mutually exclusive"); if (user != NULL) xo_errx(1, "-n and -u and mutually exclusive"); if (secret != NULL) xo_errx(1, "-n and -s and mutually exclusive"); if (discovery_host != NULL) xo_errx(1, "-n and -d and mutually exclusive"); if (rflag != 0) xo_errx(1, "-n and -r and mutually exclusive"); } else if (discovery_host != NULL) { if (portal != NULL) xo_errx(1, "-d and -p and mutually exclusive"); if (target != NULL) xo_errx(1, "-d and -t and mutually exclusive"); } else { if (target == NULL && portal == NULL) xo_errx(1, "must specify -a, -n or -t/-p"); if (target != NULL && portal == NULL) xo_errx(1, "-t must always be used with -p"); if (portal != NULL && target == NULL) xo_errx(1, "-p must always be used with -t"); } if (user != NULL && secret == NULL) xo_errx(1, "-u must always be used with -s"); if (secret != NULL && user == NULL) xo_errx(1, "-s must always be used with -u"); if (session_id != -1) xo_errx(1, "-i cannot be used with -A"); if (vflag != 0) xo_errx(1, "-v cannot be used with -A"); } else if (Mflag != 0) { if (session_id == -1) xo_errx(1, "-M requires -i"); if (nickname != NULL) { if (enable != ENABLE_UNSPECIFIED) xo_errx(1, "-n and -e and mutually exclusive"); if (portal != NULL) xo_errx(1, "-n and -p and mutually exclusive"); if (target != NULL) xo_errx(1, "-n and -t and mutually exclusive"); if (user != NULL) xo_errx(1, "-n and -u and mutually exclusive"); if (secret != NULL) xo_errx(1, "-n and -s and mutually exclusive"); } if (aflag != 0) xo_errx(1, "-a cannot be used with -M"); if (discovery_host != NULL) xo_errx(1, "-d cannot be used with -M"); if (rflag != 0) xo_errx(1, "-r cannot be used with -M"); if (vflag != 0) xo_errx(1, "-v cannot be used with -M"); if (timeout != -1) xo_errx(1, "-w cannot be used with -M"); } else if (Rflag != 0) { if (aflag != 0) { if (portal != NULL) xo_errx(1, "-a and -p and mutually exclusive"); if (target != NULL) xo_errx(1, "-a and -t and mutually exclusive"); if (nickname != NULL) xo_errx(1, "-a and -n and mutually exclusive"); } else if (nickname != NULL) { if (portal != NULL) xo_errx(1, "-n and -p and mutually exclusive"); if (target != NULL) xo_errx(1, "-n and -t and mutually exclusive"); } else if (target == NULL && portal == NULL) { xo_errx(1, "must specify either -a, -n, -t, or -p"); } if (discovery_host != NULL) xo_errx(1, "-d cannot be used with -R"); if (enable != ENABLE_UNSPECIFIED) xo_errx(1, "-e cannot be used with -R"); if (session_id != -1) xo_errx(1, "-i cannot be used with -R"); if (rflag != 0) xo_errx(1, "-r cannot be used with -R"); if (user != NULL) xo_errx(1, "-u cannot be used with -R"); if (secret != NULL) xo_errx(1, "-s cannot be used with -R"); if (vflag != 0) xo_errx(1, "-v cannot be used with -R"); if (timeout != -1) xo_errx(1, "-w cannot be used with -R"); } else { assert(Lflag != 0); if (discovery_host != NULL) xo_errx(1, "-d cannot be used with -L"); if (session_id != -1) xo_errx(1, "-i cannot be used with -L"); if (nickname != NULL) xo_errx(1, "-n cannot be used with -L"); if (portal != NULL) xo_errx(1, "-p cannot be used with -L"); if (rflag != 0) xo_errx(1, "-r cannot be used with -L"); if (target != NULL) xo_errx(1, "-t cannot be used with -L"); if (user != NULL) xo_errx(1, "-u cannot be used with -L"); if (secret != NULL) xo_errx(1, "-s cannot be used with -L"); } iscsi_fd = open(ISCSI_PATH, O_RDWR); if (iscsi_fd < 0 && errno == ENOENT) { saved_errno = errno; retval = kldload("iscsi"); if (retval != -1) iscsi_fd = open(ISCSI_PATH, O_RDWR); else errno = saved_errno; } if (iscsi_fd < 0) xo_err(1, "failed to open %s", ISCSI_PATH); if (Aflag != 0 && aflag != 0) { conf = conf_new_from_file(conf_path); TAILQ_FOREACH(targ, &conf->conf_targets, t_next) failed += kernel_add(iscsi_fd, targ); } else if (nickname != NULL) { conf = conf_new_from_file(conf_path); targ = target_find(conf, nickname); if (targ == NULL) xo_errx(1, "target %s not found in %s", nickname, conf_path); if (Aflag != 0) failed += kernel_add(iscsi_fd, targ); else if (Mflag != 0) failed += kernel_modify(iscsi_fd, session_id, targ); else if (Rflag != 0) failed += kernel_remove(iscsi_fd, targ); else failed += kernel_list(iscsi_fd, targ, vflag); } else if (Mflag != 0) { kernel_modify_some(iscsi_fd, session_id, target, portal, user, secret, enable); } else { if (Aflag != 0 && target != NULL) { if (valid_iscsi_name(target) == false) xo_errx(1, "invalid target name \"%s\"", target); } conf = conf_new(); targ = target_new(conf); targ->t_initiator_name = default_initiator_name(); targ->t_header_digest = DIGEST_NONE; targ->t_data_digest = DIGEST_NONE; targ->t_name = target; if (discovery_host != NULL) { targ->t_session_type = SESSION_TYPE_DISCOVERY; targ->t_address = discovery_host; } else { targ->t_session_type = SESSION_TYPE_NORMAL; targ->t_address = portal; } targ->t_enable = enable; if (rflag != 0) targ->t_protocol = PROTOCOL_ISER; targ->t_user = user; targ->t_secret = secret; if (Aflag != 0) failed += kernel_add(iscsi_fd, targ); else if (Rflag != 0) failed += kernel_remove(iscsi_fd, targ); else failed += kernel_list(iscsi_fd, targ, vflag); } if (timeout != -1) failed += kernel_wait(iscsi_fd, timeout); error = close(iscsi_fd); if (error != 0) xo_err(1, "close"); xo_close_container("iscsictl"); xo_finish(); if (failed != 0) return (1); return (0); } Index: projects/clang400-import/usr.bin/mail/popen.c =================================================================== --- projects/clang400-import/usr.bin/mail/popen.c (revision 312719) +++ projects/clang400-import/usr.bin/mail/popen.c (revision 312720) @@ -1,410 +1,412 @@ /* * Copyright (c) 1980, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint #if 0 static char sccsid[] = "@(#)popen.c 8.1 (Berkeley) 6/6/93"; #endif #endif /* not lint */ #include __FBSDID("$FreeBSD$"); #include "rcv.h" #include #include #include #include #include "extern.h" #define READ 0 #define WRITE 1 struct fp { FILE *fp; int pipe; pid_t pid; struct fp *link; }; static struct fp *fp_head; struct child { pid_t pid; char done; char free; int status; struct child *link; }; static struct child *child, *child_freelist = NULL; static void delchild(struct child *); static pid_t file_pid(FILE *); static pid_t start_commandv(char *, sigset_t *, int, int, va_list); FILE * Fopen(const char *path, const char *mode) { FILE *fp; if ((fp = fopen(path, mode)) != NULL) { register_file(fp, 0, 0); (void)fcntl(fileno(fp), F_SETFD, 1); } return (fp); } FILE * Fdopen(int fd, const char *mode) { FILE *fp; if ((fp = fdopen(fd, mode)) != NULL) { register_file(fp, 0, 0); (void)fcntl(fileno(fp), F_SETFD, 1); } return (fp); } int Fclose(FILE *fp) { unregister_file(fp); return (fclose(fp)); } FILE * Popen(char *cmd, const char *mode) { int p[2]; int myside, hisside, fd0, fd1; pid_t pid; sigset_t nset; FILE *fp; if (pipe(p) < 0) return (NULL); (void)fcntl(p[READ], F_SETFD, 1); (void)fcntl(p[WRITE], F_SETFD, 1); if (*mode == 'r') { myside = p[READ]; hisside = fd0 = fd1 = p[WRITE]; } else { myside = p[WRITE]; hisside = fd0 = p[READ]; fd1 = -1; } (void)sigemptyset(&nset); pid = start_command(value("SHELL"), &nset, fd0, fd1, "-c", cmd, NULL); if (pid < 0) { (void)close(p[READ]); (void)close(p[WRITE]); return (NULL); } (void)close(hisside); if ((fp = fdopen(myside, mode)) != NULL) register_file(fp, 1, pid); return (fp); } int Pclose(FILE *ptr) { int i; sigset_t nset, oset; i = file_pid(ptr); unregister_file(ptr); (void)fclose(ptr); (void)sigemptyset(&nset); (void)sigaddset(&nset, SIGINT); (void)sigaddset(&nset, SIGHUP); (void)sigprocmask(SIG_BLOCK, &nset, &oset); i = wait_child(i); (void)sigprocmask(SIG_SETMASK, &oset, NULL); return (i); } void close_all_files(void) { while (fp_head != NULL) if (fp_head->pipe) (void)Pclose(fp_head->fp); else (void)Fclose(fp_head->fp); } void register_file(FILE *fp, int pipe, pid_t pid) { struct fp *fpp; if ((fpp = malloc(sizeof(*fpp))) == NULL) err(1, "Out of memory"); fpp->fp = fp; fpp->pipe = pipe; fpp->pid = pid; fpp->link = fp_head; fp_head = fpp; } void unregister_file(FILE *fp) { struct fp **pp, *p; for (pp = &fp_head; (p = *pp) != NULL; pp = &p->link) if (p->fp == fp) { *pp = p->link; (void)free(p); return; } errx(1, "Invalid file pointer"); /*NOTREACHED*/ } pid_t file_pid(FILE *fp) { struct fp *p; for (p = fp_head; p != NULL; p = p->link) if (p->fp == fp) return (p->pid); errx(1, "Invalid file pointer"); /*NOTREACHED*/ } /* * Run a command without a shell, with optional arguments and splicing * of stdin (-1 means none) and stdout. The command name can be a sequence * of words. * Signals must be handled by the caller. * "nset" contains the signals to ignore in the new process. * SIGINT is enabled unless it's in "nset". */ static pid_t start_commandv(char *cmd, sigset_t *nset, int infd, int outfd, va_list args) { pid_t pid; if ((pid = fork()) < 0) { warn("fork"); return (-1); } if (pid == 0) { char *argv[100]; int i = getrawlist(cmd, argv, sizeof(argv) / sizeof(*argv)); while ((argv[i++] = va_arg(args, char *))) ; argv[i] = NULL; prepare_child(nset, infd, outfd); execvp(argv[0], argv); warn("%s", argv[0]); _exit(1); } return (pid); } int run_command(char *cmd, sigset_t *nset, int infd, int outfd, ...) { pid_t pid; va_list args; va_start(args, outfd); pid = start_commandv(cmd, nset, infd, outfd, args); va_end(args); if (pid < 0) return -1; return wait_command(pid); } int start_command(char *cmd, sigset_t *nset, int infd, int outfd, ...) { va_list args; int r; va_start(args, outfd); r = start_commandv(cmd, nset, infd, outfd, args); va_end(args); return r; } void prepare_child(sigset_t *nset, int infd, int outfd) { int i; sigset_t eset; /* * All file descriptors other than 0, 1, and 2 are supposed to be * close-on-exec. */ if (infd >= 0) dup2(infd, 0); if (outfd >= 0) dup2(outfd, 1); for (i = 1; i < NSIG; i++) if (nset != NULL && sigismember(nset, i)) (void)signal(i, SIG_IGN); if (nset == NULL || !sigismember(nset, SIGINT)) (void)signal(SIGINT, SIG_DFL); (void)sigemptyset(&eset); (void)sigprocmask(SIG_SETMASK, &eset, NULL); } int wait_command(pid_t pid) { if (wait_child(pid) < 0) { printf("Fatal error in process.\n"); return (-1); } return (0); } static struct child * findchild(pid_t pid, int dont_alloc) { struct child **cpp; for (cpp = &child; *cpp != NULL && (*cpp)->pid != pid; cpp = &(*cpp)->link) ; if (*cpp == NULL) { - if (dont_alloc) + if (dont_alloc) return(NULL); if (child_freelist) { *cpp = child_freelist; child_freelist = (*cpp)->link; } else { *cpp = malloc(sizeof(struct child)); if (*cpp == NULL) err(1, "malloc"); } (*cpp)->pid = pid; (*cpp)->done = (*cpp)->free = 0; (*cpp)->link = NULL; } return (*cpp); } static void delchild(struct child *cp) { struct child **cpp; for (cpp = &child; *cpp != cp; cpp = &(*cpp)->link) ; *cpp = cp->link; cp->link = child_freelist; child_freelist = cp; } /*ARGSUSED*/ void sigchild(int signo __unused) { pid_t pid; int status; struct child *cp; int save_errno; save_errno = errno; while ((pid = waitpid(-1, &status, WNOHANG)) > 0) { cp = findchild(pid, 1); + if (cp == NULL) + continue; if (cp->free) delchild(cp); else { cp->done = 1; cp->status = status; } } errno = save_errno; } int wait_status; /* * Wait for a specific child to die. */ int wait_child(pid_t pid) { struct child *cp; sigset_t nset, oset; pid_t rv = 0; (void)sigemptyset(&nset); (void)sigaddset(&nset, SIGCHLD); (void)sigprocmask(SIG_BLOCK, &nset, &oset); /* * If we have not already waited on the pid (via sigchild) * wait on it now. Otherwise, use the wait status stashed * by sigchild. */ cp = findchild(pid, 1); if (cp == NULL || !cp->done) rv = waitpid(pid, &wait_status, 0); else wait_status = cp->status; if (cp != NULL) delchild(cp); (void)sigprocmask(SIG_SETMASK, &oset, NULL); if (rv == -1 || (WIFEXITED(wait_status) && WEXITSTATUS(wait_status))) return -1; else return 0; } /* * Mark a child as don't care. */ void free_child(pid_t pid) { struct child *cp; sigset_t nset, oset; (void)sigemptyset(&nset); (void)sigaddset(&nset, SIGCHLD); (void)sigprocmask(SIG_BLOCK, &nset, &oset); if ((cp = findchild(pid, 0)) != NULL) { if (cp->done) delchild(cp); else cp->free = 1; } (void)sigprocmask(SIG_SETMASK, &oset, NULL); } Index: projects/clang400-import/usr.bin/mail/send.c =================================================================== --- projects/clang400-import/usr.bin/mail/send.c (revision 312719) +++ projects/clang400-import/usr.bin/mail/send.c (revision 312720) @@ -1,585 +1,590 @@ /* * Copyright (c) 1980, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint #if 0 static char sccsid[] = "@(#)send.c 8.1 (Berkeley) 6/6/93"; #endif #endif /* not lint */ #include __FBSDID("$FreeBSD$"); #include "rcv.h" #include "extern.h" /* * Mail -- a mail program * * Mail to others. */ /* * Send message described by the passed pointer to the * passed output buffer. Return -1 on error. * Adjust the status: field if need be. * If doign is given, suppress ignored header fields. * prefix is a string to prepend to each output line. */ int sendmessage(struct message *mp, FILE *obuf, struct ignoretab *doign, char *prefix) { long count; FILE *ibuf; char *cp, *cp2, line[LINESIZE]; int ishead, infld, ignoring, dostat, firstline; - int c, length, prefixlen; + int c = 0, length, prefixlen; /* * Compute the prefix string, without trailing whitespace */ if (prefix != NULL) { cp2 = 0; for (cp = prefix; *cp != '\0'; cp++) if (*cp != ' ' && *cp != '\t') cp2 = cp; prefixlen = cp2 == NULL ? 0 : cp2 - prefix + 1; } ibuf = setinput(mp); count = mp->m_size; ishead = 1; dostat = doign == 0 || !isign("status", doign); infld = 0; firstline = 1; /* * Process headers first */ while (count > 0 && ishead) { if (fgets(line, sizeof(line), ibuf) == NULL) break; count -= length = strlen(line); if (firstline) { /* * First line is the From line, so no headers * there to worry about */ firstline = 0; ignoring = doign == ignoreall; } else if (line[0] == '\n') { /* * If line is blank, we've reached end of * headers, so force out status: field * and note that we are no longer in header * fields */ if (dostat) { statusput(mp, obuf, prefix); dostat = 0; } ishead = 0; ignoring = doign == ignoreall; } else if (infld && (line[0] == ' ' || line[0] == '\t')) { /* * If this line is a continuation (via space or tab) * of a previous header field, just echo it * (unless the field should be ignored). * In other words, nothing to do. */ } else { /* * Pick up the header field if we have one. */ for (cp = line; (c = *cp++) != '\0' && c != ':' && !isspace((unsigned char)c);) ; cp2 = --cp; while (isspace((unsigned char)*cp++)) ; if (cp[-1] != ':') { /* * Not a header line, force out status: * This happens in uucp style mail where * there are no headers at all. */ if (dostat) { statusput(mp, obuf, prefix); dostat = 0; } if (doign != ignoreall) /* add blank line */ (void)putc('\n', obuf); ishead = 0; ignoring = 0; } else { /* * If it is an ignored field and * we care about such things, skip it. */ *cp2 = '\0'; /* temporarily null terminate */ if (doign && isign(line, doign)) ignoring = 1; else if ((line[0] == 's' || line[0] == 'S') && strcasecmp(line, "status") == 0) { /* * If the field is "status," go compute * and print the real Status: field */ if (dostat) { statusput(mp, obuf, prefix); dostat = 0; } ignoring = 1; } else { ignoring = 0; *cp2 = c; /* restore */ } infld = 1; } } if (!ignoring) { /* * Strip trailing whitespace from prefix * if line is blank. */ if (prefix != NULL) { if (length > 1) fputs(prefix, obuf); else (void)fwrite(prefix, sizeof(*prefix), prefixlen, obuf); } (void)fwrite(line, sizeof(*line), length, obuf); if (ferror(obuf)) return (-1); } } /* * Copy out message body */ if (doign == ignoreall) count--; /* skip final blank line */ if (prefix != NULL) while (count > 0) { if (fgets(line, sizeof(line), ibuf) == NULL) { c = 0; break; } count -= c = strlen(line); /* * Strip trailing whitespace from prefix * if line is blank. */ if (c > 1) fputs(prefix, obuf); else (void)fwrite(prefix, sizeof(*prefix), prefixlen, obuf); (void)fwrite(line, sizeof(*line), c, obuf); if (ferror(obuf)) return (-1); } else while (count > 0) { c = count < LINESIZE ? count : LINESIZE; if ((c = fread(line, sizeof(*line), c, ibuf)) <= 0) break; count -= c; if (fwrite(line, sizeof(*line), c, obuf) != c) return (-1); } if (doign == ignoreall && c > 0 && line[c - 1] != '\n') /* no final blank line */ if ((c = getc(ibuf)) != EOF && putc(c, obuf) == EOF) return (-1); return (0); } /* * Output a reasonable looking status field. */ void statusput(struct message *mp, FILE *obuf, char *prefix) { char statout[3]; char *cp = statout; if (mp->m_flag & MREAD) *cp++ = 'R'; if ((mp->m_flag & MNEW) == 0) *cp++ = 'O'; *cp = '\0'; if (statout[0] != '\0') fprintf(obuf, "%sStatus: %s\n", prefix == NULL ? "" : prefix, statout); } /* * Interface between the argument list and the mail1 routine * which does all the dirty work. */ int mail(struct name *to, struct name *cc, struct name *bcc, struct name *smopts, char *subject, char *replyto) { struct header head; head.h_to = to; head.h_subject = subject; head.h_cc = cc; head.h_bcc = bcc; head.h_smopts = smopts; head.h_replyto = replyto; head.h_inreplyto = NULL; mail1(&head, 0); return (0); } /* * Send mail to a bunch of user names. The interface is through * the mail routine below. */ int sendmail(char *str) { struct header head; head.h_to = extract(str, GTO); head.h_subject = NULL; head.h_cc = NULL; head.h_bcc = NULL; head.h_smopts = NULL; head.h_replyto = value("REPLYTO"); head.h_inreplyto = NULL; mail1(&head, 0); return (0); } /* * Mail a message on standard input to the people indicated * in the passed header. (Internal interface). */ void mail1(struct header *hp, int printheaders) { char *cp; char *nbuf; int pid; char **namelist; struct name *to, *nsto; FILE *mtf; /* * Collect user's mail from standard input. * Get the result as mtf. */ if ((mtf = collect(hp, printheaders)) == NULL) return; if (value("interactive") != NULL) { if (value("askcc") != NULL || value("askbcc") != NULL) { if (value("askcc") != NULL) grabh(hp, GCC); if (value("askbcc") != NULL) grabh(hp, GBCC); } else { printf("EOT\n"); (void)fflush(stdout); } } if (fsize(mtf) == 0) { if (value("dontsendempty") != NULL) goto out; if (hp->h_subject == NULL) printf("No message, no subject; hope that's ok\n"); else printf("Null message body; hope that's ok\n"); } /* * Now, take the user names from the combined * to and cc lists and do all the alias * processing. */ senderr = 0; to = usermap(cat(hp->h_bcc, cat(hp->h_to, hp->h_cc))); if (to == NULL) { printf("No recipients specified\n"); senderr++; } /* * Look through the recipient list for names with /'s * in them which we write to as files directly. */ to = outof(to, mtf, hp); if (senderr) savedeadletter(mtf); to = elide(to); if (count(to) == 0) goto out; if (value("recordrecip") != NULL) { /* * Before fixing the header, save old To:. * We do this because elide above has sorted To: list, and * we would like to save message in a file named by the first * recipient the user has entered, not the one being the first * after sorting happened. */ if ((nsto = malloc(sizeof(struct name))) == NULL) err(1, "Out of memory"); bcopy(hp->h_to, nsto, sizeof(struct name)); } fixhead(hp, to); if ((mtf = infix(hp, mtf)) == NULL) { fprintf(stderr, ". . . message lost, sorry.\n"); return; } namelist = unpack(cat(hp->h_smopts, to)); if (debug) { char **t; printf("Sendmail arguments:"); for (t = namelist; *t != NULL; t++) printf(" \"%s\"", *t); printf("\n"); goto out; } if (value("recordrecip") != NULL) { /* * Extract first recipient username from saved To: and use it * as a filename. */ if ((nbuf = malloc(strlen(detract(nsto, 0)) + 1)) == NULL) err(1, "Out of memory"); if ((cp = yanklogin(detract(nsto, 0), nbuf)) != NULL) (void)savemail(expand(nbuf), mtf); free(nbuf); free(nsto); } else if ((cp = value("record")) != NULL) (void)savemail(expand(cp), mtf); /* * Fork, set up the temporary mail file as standard * input for "mail", and exec with the user list we generated * far above. */ pid = fork(); if (pid == -1) { warn("fork"); savedeadletter(mtf); goto out; } if (pid == 0) { sigset_t nset; (void)sigemptyset(&nset); (void)sigaddset(&nset, SIGHUP); (void)sigaddset(&nset, SIGINT); (void)sigaddset(&nset, SIGQUIT); (void)sigaddset(&nset, SIGTSTP); (void)sigaddset(&nset, SIGTTIN); (void)sigaddset(&nset, SIGTTOU); prepare_child(&nset, fileno(mtf), -1); if ((cp = value("sendmail")) != NULL) cp = expand(cp); else cp = _PATH_SENDMAIL; execv(cp, namelist); warn("%s", cp); _exit(1); } if (value("verbose") != NULL) (void)wait_child(pid); else free_child(pid); out: (void)Fclose(mtf); } /* * Fix the header by glopping all of the expanded names from * the distribution list into the appropriate fields. */ void fixhead(struct header *hp, struct name *tolist) { struct name *np; hp->h_to = NULL; hp->h_cc = NULL; hp->h_bcc = NULL; for (np = tolist; np != NULL; np = np->n_flink) { /* Don't copy deleted addresses to the header */ if (np->n_type & GDEL) continue; if ((np->n_type & GMASK) == GTO) hp->h_to = cat(hp->h_to, nalloc(np->n_name, np->n_type)); else if ((np->n_type & GMASK) == GCC) hp->h_cc = cat(hp->h_cc, nalloc(np->n_name, np->n_type)); else if ((np->n_type & GMASK) == GBCC) hp->h_bcc = cat(hp->h_bcc, nalloc(np->n_name, np->n_type)); } } /* * Prepend a header in front of the collected stuff * and return the new file. */ FILE * infix(struct header *hp, FILE *fi) { FILE *nfo, *nfi; int c, fd; char tempname[PATHSIZE]; (void)snprintf(tempname, sizeof(tempname), "%s/mail.RsXXXXXXXXXX", tmpdir); if ((fd = mkstemp(tempname)) == -1 || (nfo = Fdopen(fd, "w")) == NULL) { warn("%s", tempname); return (fi); } if ((nfi = Fopen(tempname, "r")) == NULL) { warn("%s", tempname); (void)Fclose(nfo); (void)rm(tempname); return (fi); } (void)rm(tempname); (void)puthead(hp, nfo, GTO|GSUBJECT|GCC|GBCC|GREPLYTO|GINREPLYTO|GNL|GCOMMA); c = getc(fi); while (c != EOF) { (void)putc(c, nfo); c = getc(fi); } if (ferror(fi)) { warnx("read"); rewind(fi); return (fi); } (void)fflush(nfo); if (ferror(nfo)) { warn("%s", tempname); (void)Fclose(nfo); (void)Fclose(nfi); rewind(fi); return (fi); } (void)Fclose(nfo); (void)Fclose(fi); rewind(nfi); return (nfi); } /* * Dump the to, subject, cc header on the * passed file buffer. */ int puthead(struct header *hp, FILE *fo, int w) { int gotcha; gotcha = 0; if (hp->h_to != NULL && w & GTO) fmt("To:", hp->h_to, fo, w&GCOMMA), gotcha++; if (hp->h_subject != NULL && w & GSUBJECT) fprintf(fo, "Subject: %s\n", hp->h_subject), gotcha++; if (hp->h_cc != NULL && w & GCC) fmt("Cc:", hp->h_cc, fo, w&GCOMMA), gotcha++; if (hp->h_bcc != NULL && w & GBCC) fmt("Bcc:", hp->h_bcc, fo, w&GCOMMA), gotcha++; if (hp->h_replyto != NULL && w & GREPLYTO) fprintf(fo, "Reply-To: %s\n", hp->h_replyto), gotcha++; if (hp->h_inreplyto != NULL && w & GINREPLYTO) fprintf(fo, "In-Reply-To: <%s>\n", hp->h_inreplyto), gotcha++; if (gotcha && w & GNL) (void)putc('\n', fo); return (0); } /* * Format the given header line to not exceed 72 characters. */ void fmt(const char *str, struct name *np, FILE *fo, int comma) { int col, len; comma = comma ? 1 : 0; col = strlen(str); if (col) fputs(str, fo); for (; np != NULL; np = np->n_flink) { if (np->n_flink == NULL) comma = 0; len = strlen(np->n_name); col++; /* for the space */ if (col + len + comma > 72 && col > 4) { fprintf(fo, "\n "); col = 4; } else fprintf(fo, " "); fputs(np->n_name, fo); if (comma) fprintf(fo, ","); col += len + comma; } fprintf(fo, "\n"); } /* * Save the outgoing mail on the passed file. */ /*ARGSUSED*/ int savemail(char name[], FILE *fi) { FILE *fo; char buf[BUFSIZ]; int i; time_t now; + mode_t saved_umask; - if ((fo = Fopen(name, "a")) == NULL) { + saved_umask = umask(077); + fo = Fopen(name, "a"); + umask(saved_umask); + + if (fo == NULL) { warn("%s", name); return (-1); } (void)time(&now); fprintf(fo, "From %s %s", myname, ctime(&now)); while ((i = fread(buf, 1, sizeof(buf), fi)) > 0) (void)fwrite(buf, 1, i, fo); fprintf(fo, "\n"); (void)fflush(fo); if (ferror(fo)) warn("%s", name); (void)Fclose(fo); rewind(fi); return (0); } Index: projects/clang400-import/usr.bin/sort/radixsort.c =================================================================== --- projects/clang400-import/usr.bin/sort/radixsort.c (revision 312719) +++ projects/clang400-import/usr.bin/sort/radixsort.c (revision 312720) @@ -1,693 +1,697 @@ /*- * Copyright (C) 2012 Oleg Moskalenko * Copyright (C) 2012 Gabor Kovesdan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #if defined(SORT_THREADS) #include #include #endif #include #include #include #include #include #include "coll.h" #include "radixsort.h" #define DEFAULT_SORT_FUNC_RADIXSORT mergesort #define TINY_NODE(sl) ((sl)->tosort_num < 65) #define SMALL_NODE(sl) ((sl)->tosort_num < 5) /* are we sorting in reverse order ? */ static bool reverse_sort; /* sort sub-levels array size */ static const size_t slsz = 256 * sizeof(struct sort_level*); /* one sort level structure */ struct sort_level { struct sort_level **sublevels; struct sort_list_item **leaves; struct sort_list_item **sorted; struct sort_list_item **tosort; size_t leaves_num; size_t leaves_sz; size_t level; size_t real_sln; size_t start_position; size_t sln; size_t tosort_num; size_t tosort_sz; }; /* stack of sort levels ready to be sorted */ struct level_stack { struct level_stack *next; struct sort_level *sl; }; static struct level_stack *g_ls; #if defined(SORT_THREADS) /* stack guarding mutex */ +static pthread_cond_t g_ls_cond; static pthread_mutex_t g_ls_mutex; /* counter: how many items are left */ static size_t sort_left; /* guarding mutex */ -static pthread_mutex_t sort_left_mutex; /* semaphore to count threads */ static sem_t mtsem; /* * Decrement items counter */ static inline void sort_left_dec(size_t n) { - - pthread_mutex_lock(&sort_left_mutex); + pthread_mutex_lock(&g_ls_mutex); sort_left -= n; - pthread_mutex_unlock(&sort_left_mutex); + if (sort_left == 0 && nthreads > 1) + pthread_cond_broadcast(&g_ls_cond); + pthread_mutex_unlock(&g_ls_mutex); } /* * Do we have something to sort ? + * + * This routine does not need to be locked. */ static inline bool have_sort_left(void) { bool ret; - pthread_mutex_lock(&sort_left_mutex); ret = (sort_left > 0); - pthread_mutex_unlock(&sort_left_mutex); + return (ret); } #else #define sort_left_dec(n) #endif /* SORT_THREADS */ /* * Push sort level to the stack */ static inline void push_ls(struct sort_level *sl) { struct level_stack *new_ls; new_ls = sort_malloc(sizeof(struct level_stack)); new_ls->sl = sl; #if defined(SORT_THREADS) if (nthreads > 1) pthread_mutex_lock(&g_ls_mutex); #endif new_ls->next = g_ls; g_ls = new_ls; #if defined(SORT_THREADS) if (nthreads > 1) + pthread_cond_signal(&g_ls_cond); +#endif + +#if defined(SORT_THREADS) + if (nthreads > 1) pthread_mutex_unlock(&g_ls_mutex); #endif } /* * Pop sort level from the stack (single-threaded style) */ static inline struct sort_level* pop_ls_st(void) { struct sort_level *sl; if (g_ls) { struct level_stack *saved_ls; sl = g_ls->sl; saved_ls = g_ls; g_ls = g_ls->next; sort_free(saved_ls); } else sl = NULL; return (sl); } #if defined(SORT_THREADS) /* * Pop sort level from the stack (multi-threaded style) */ static inline struct sort_level* pop_ls_mt(void) { struct level_stack *saved_ls; struct sort_level *sl; pthread_mutex_lock(&g_ls_mutex); - if (g_ls) { - sl = g_ls->sl; - saved_ls = g_ls; - g_ls = g_ls->next; - } else { + for (;;) { + if (g_ls) { + sl = g_ls->sl; + saved_ls = g_ls; + g_ls = g_ls->next; + break; + } sl = NULL; saved_ls = NULL; + + if (have_sort_left() == 0) + break; + pthread_cond_wait(&g_ls_cond, &g_ls_mutex); } pthread_mutex_unlock(&g_ls_mutex); sort_free(saved_ls); return (sl); } #endif /* defined(SORT_THREADS) */ static void add_to_sublevel(struct sort_level *sl, struct sort_list_item *item, size_t indx) { struct sort_level *ssl; ssl = sl->sublevels[indx]; if (ssl == NULL) { ssl = sort_malloc(sizeof(struct sort_level)); memset(ssl, 0, sizeof(struct sort_level)); ssl->level = sl->level + 1; sl->sublevels[indx] = ssl; ++(sl->real_sln); } if (++(ssl->tosort_num) > ssl->tosort_sz) { ssl->tosort_sz = ssl->tosort_num + 128; ssl->tosort = sort_realloc(ssl->tosort, sizeof(struct sort_list_item*) * (ssl->tosort_sz)); } ssl->tosort[ssl->tosort_num - 1] = item; } static inline void add_leaf(struct sort_level *sl, struct sort_list_item *item) { if (++(sl->leaves_num) > sl->leaves_sz) { sl->leaves_sz = sl->leaves_num + 128; sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item*) * (sl->leaves_sz))); } sl->leaves[sl->leaves_num - 1] = item; } static inline int get_wc_index(struct sort_list_item *sli, size_t level) { const struct key_value *kv; const struct bwstring *bws; kv = get_key_from_keys_array(&sli->ka, 0); bws = kv->k; if ((BWSLEN(bws) > level)) return (unsigned char) BWS_GET(bws,level); return (-1); } static void place_item(struct sort_level *sl, size_t item) { struct sort_list_item *sli; int c; sli = sl->tosort[item]; c = get_wc_index(sli, sl->level); if (c == -1) add_leaf(sl, sli); else add_to_sublevel(sl, sli, c); } static void free_sort_level(struct sort_level *sl) { if (sl) { if (sl->leaves) sort_free(sl->leaves); if (sl->level > 0) sort_free(sl->tosort); if (sl->sublevels) { struct sort_level *slc; size_t sln; sln = sl->sln; for (size_t i = 0; i < sln; ++i) { slc = sl->sublevels[i]; if (slc) free_sort_level(slc); } sort_free(sl->sublevels); } sort_free(sl); } } static void run_sort_level_next(struct sort_level *sl) { struct sort_level *slc; size_t i, sln, tosort_num; if (sl->sublevels) { sort_free(sl->sublevels); sl->sublevels = NULL; } switch (sl->tosort_num) { case 0: goto end; case (1): sl->sorted[sl->start_position] = sl->tosort[0]; sort_left_dec(1); goto end; case (2): if (list_coll_offset(&(sl->tosort[0]), &(sl->tosort[1]), sl->level) > 0) { sl->sorted[sl->start_position++] = sl->tosort[1]; sl->sorted[sl->start_position] = sl->tosort[0]; } else { sl->sorted[sl->start_position++] = sl->tosort[0]; sl->sorted[sl->start_position] = sl->tosort[1]; } sort_left_dec(2); goto end; default: if (TINY_NODE(sl) || (sl->level > 15)) { listcoll_t func; func = get_list_call_func(sl->level); sl->leaves = sl->tosort; sl->leaves_num = sl->tosort_num; sl->leaves_sz = sl->leaves_num; sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item *) * (sl->leaves_sz))); sl->tosort = NULL; sl->tosort_num = 0; sl->tosort_sz = 0; sl->sln = 0; sl->real_sln = 0; if (sort_opts_vals.sflag) { if (mergesort(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) func) == -1) /* NOTREACHED */ err(2, "Radix sort error 3"); } else DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) func); memcpy(sl->sorted + sl->start_position, sl->leaves, sl->leaves_num * sizeof(struct sort_list_item*)); sort_left_dec(sl->leaves_num); goto end; } else { sl->tosort_sz = sl->tosort_num; sl->tosort = sort_realloc(sl->tosort, sizeof(struct sort_list_item*) * (sl->tosort_sz)); } } sl->sln = 256; sl->sublevels = sort_malloc(slsz); memset(sl->sublevels, 0, slsz); sl->real_sln = 0; tosort_num = sl->tosort_num; for (i = 0; i < tosort_num; ++i) place_item(sl, i); sort_free(sl->tosort); sl->tosort = NULL; sl->tosort_num = 0; sl->tosort_sz = 0; if (sl->leaves_num > 1) { if (keys_num > 1) { if (sort_opts_vals.sflag) { mergesort(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) list_coll); } else { DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) list_coll); } } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) { DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) list_coll_by_str_only); } } sl->leaves_sz = sl->leaves_num; sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item *) * (sl->leaves_sz))); if (!reverse_sort) { memcpy(sl->sorted + sl->start_position, sl->leaves, sl->leaves_num * sizeof(struct sort_list_item*)); sl->start_position += sl->leaves_num; sort_left_dec(sl->leaves_num); sort_free(sl->leaves); sl->leaves = NULL; sl->leaves_num = 0; sl->leaves_sz = 0; sln = sl->sln; for (i = 0; i < sln; ++i) { slc = sl->sublevels[i]; if (slc) { slc->sorted = sl->sorted; slc->start_position = sl->start_position; sl->start_position += slc->tosort_num; if (SMALL_NODE(slc)) run_sort_level_next(slc); else push_ls(slc); sl->sublevels[i] = NULL; } } } else { size_t n; sln = sl->sln; for (i = 0; i < sln; ++i) { n = sln - i - 1; slc = sl->sublevels[n]; if (slc) { slc->sorted = sl->sorted; slc->start_position = sl->start_position; sl->start_position += slc->tosort_num; if (SMALL_NODE(slc)) run_sort_level_next(slc); else push_ls(slc); sl->sublevels[n] = NULL; } } memcpy(sl->sorted + sl->start_position, sl->leaves, sl->leaves_num * sizeof(struct sort_list_item*)); sort_left_dec(sl->leaves_num); } end: free_sort_level(sl); } /* * Single-threaded sort cycle */ static void run_sort_cycle_st(void) { struct sort_level *slc; for (;;) { slc = pop_ls_st(); if (slc == NULL) { break; } run_sort_level_next(slc); } } #if defined(SORT_THREADS) /* * Multi-threaded sort cycle */ static void run_sort_cycle_mt(void) { struct sort_level *slc; for (;;) { slc = pop_ls_mt(); - if (slc == NULL) { - if (have_sort_left()) { - pthread_yield(); - continue; - } + if (slc == NULL) break; - } run_sort_level_next(slc); } } /* * Sort cycle thread (in multi-threaded mode) */ static void* sort_thread(void* arg) { - run_sort_cycle_mt(); - sem_post(&mtsem); return (arg); } #endif /* defined(SORT_THREADS) */ static void run_top_sort_level(struct sort_level *sl) { struct sort_level *slc; reverse_sort = sort_opts_vals.kflag ? keys[0].sm.rflag : default_sort_mods->rflag; sl->start_position = 0; sl->sln = 256; sl->sublevels = sort_malloc(slsz); memset(sl->sublevels, 0, slsz); for (size_t i = 0; i < sl->tosort_num; ++i) place_item(sl, i); if (sl->leaves_num > 1) { if (keys_num > 1) { if (sort_opts_vals.sflag) { mergesort(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) list_coll); } else { DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) list_coll); } } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) { DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num, sizeof(struct sort_list_item *), (int(*)(const void *, const void *)) list_coll_by_str_only); } } if (!reverse_sort) { memcpy(sl->tosort + sl->start_position, sl->leaves, sl->leaves_num * sizeof(struct sort_list_item*)); sl->start_position += sl->leaves_num; sort_left_dec(sl->leaves_num); for (size_t i = 0; i < sl->sln; ++i) { slc = sl->sublevels[i]; if (slc) { slc->sorted = sl->tosort; slc->start_position = sl->start_position; sl->start_position += slc->tosort_num; push_ls(slc); sl->sublevels[i] = NULL; } } } else { size_t n; for (size_t i = 0; i < sl->sln; ++i) { n = sl->sln - i - 1; slc = sl->sublevels[n]; if (slc) { slc->sorted = sl->tosort; slc->start_position = sl->start_position; sl->start_position += slc->tosort_num; push_ls(slc); sl->sublevels[n] = NULL; } } memcpy(sl->tosort + sl->start_position, sl->leaves, sl->leaves_num * sizeof(struct sort_list_item*)); sort_left_dec(sl->leaves_num); } #if defined(SORT_THREADS) if (nthreads < 2) { #endif run_sort_cycle_st(); #if defined(SORT_THREADS) } else { size_t i; for(i = 0; i < nthreads; ++i) { pthread_attr_t attr; pthread_t pth; pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, - PTHREAD_DETACHED); + pthread_attr_setdetachstate(&attr, PTHREAD_DETACHED); for (;;) { int res = pthread_create(&pth, &attr, sort_thread, NULL); if (res >= 0) break; if (errno == EAGAIN) { pthread_yield(); continue; } err(2, NULL); } pthread_attr_destroy(&attr); } - for(i = 0; i < nthreads; ++i) + for (i = 0; i < nthreads; ++i) sem_wait(&mtsem); } #endif /* defined(SORT_THREADS) */ } static void run_sort(struct sort_list_item **base, size_t nmemb) { struct sort_level *sl; #if defined(SORT_THREADS) size_t nthreads_save = nthreads; if (nmemb < MT_SORT_THRESHOLD) nthreads = 1; if (nthreads > 1) { pthread_mutexattr_t mattr; pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ADAPTIVE_NP); pthread_mutex_init(&g_ls_mutex, &mattr); - pthread_mutex_init(&sort_left_mutex, &mattr); + pthread_cond_init(&g_ls_cond, NULL); pthread_mutexattr_destroy(&mattr); sem_init(&mtsem, 0, 0); } #endif sl = sort_malloc(sizeof(struct sort_level)); memset(sl, 0, sizeof(struct sort_level)); sl->tosort = base; sl->tosort_num = nmemb; sl->tosort_sz = nmemb; #if defined(SORT_THREADS) sort_left = nmemb; #endif run_top_sort_level(sl); free_sort_level(sl); #if defined(SORT_THREADS) if (nthreads > 1) { sem_destroy(&mtsem); pthread_mutex_destroy(&g_ls_mutex); - pthread_mutex_destroy(&sort_left_mutex); } nthreads = nthreads_save; #endif } void rxsort(struct sort_list_item **base, size_t nmemb) { run_sort(base, nmemb); } Index: projects/clang400-import/usr.bin/truss/riscv64-freebsd.c =================================================================== --- projects/clang400-import/usr.bin/truss/riscv64-freebsd.c (nonexistent) +++ projects/clang400-import/usr.bin/truss/riscv64-freebsd.c (revision 312720) @@ -0,0 +1,106 @@ +/*- + * Copyright 2017 Li-Wen Hsu + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* FreeBSD/riscv64-specific system call handling. */ + +#include +#include + +#include +#include + +#include +#include +#include + +#include "truss.h" + +static int +riscv64_fetch_args(struct trussinfo *trussinfo, u_int narg) +{ + struct reg regs; + struct current_syscall *cs; + lwpid_t tid; + u_int i, reg, syscall_num; + + tid = trussinfo->curthread->tid; + cs = &trussinfo->curthread->cs; + if (ptrace(PT_GETREGS, tid, (caddr_t)®s, 0) < 0) { + fprintf(trussinfo->outfile, "-- CANNOT READ REGISTERS --\n"); + return (-1); + } + + /* + * FreeBSD has two special kinds of system call redirections -- + * SYS_syscall, and SYS___syscall. The former is the old syscall() + * routine, basically; the latter is for quad-aligned arguments. + * + * The system call argument count and code from ptrace() already + * account for these, but we need to skip over the first argument. + */ + syscall_num = regs.t[0]; + if (syscall_num == SYS_syscall || syscall_num == SYS___syscall) { + reg = 1; + syscall_num = regs.a[0]; + } else { + reg = 0; + } + + for (i = 0; i < narg && reg < NARGREG; i++, reg++) + cs->args[i] = regs.a[reg]; + return (0); +} + +static int +riscv64_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) +{ + struct reg regs; + lwpid_t tid; + + tid = trussinfo->curthread->tid; + if (ptrace(PT_GETREGS, tid, (caddr_t)®s, 0) < 0) { + fprintf(trussinfo->outfile, "-- CANNOT READ REGISTERS --\n"); + return (-1); + } + + retval[0] = regs.a[0]; + retval[1] = regs.a[1]; + *errorp = !!(regs.t[0]); + return (0); +} + +static struct procabi riscv64_freebsd = { + "FreeBSD ELF64", + SYSDECODE_ABI_FREEBSD, + riscv64_fetch_args, + riscv64_fetch_retval, + STAILQ_HEAD_INITIALIZER(riscv64_freebsd.extra_syscalls), + { NULL } +}; + +PROCABI(riscv64_freebsd); Property changes on: projects/clang400-import/usr.bin/truss/riscv64-freebsd.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/clang400-import/usr.sbin/ctladm/ctladm.8 =================================================================== --- projects/clang400-import/usr.sbin/ctladm/ctladm.8 (revision 312719) +++ projects/clang400-import/usr.sbin/ctladm/ctladm.8 (revision 312720) @@ -1,1042 +1,1057 @@ .\" .\" Copyright (c) 2003 Silicon Graphics International Corp. .\" Copyright (c) 2015 Alexander Motin .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions, and the following disclaimer, .\" without modification. .\" 2. Redistributions in binary form must reproduce at minimum a disclaimer .\" substantially similar to the "NO WARRANTY" disclaimer below .\" ("Disclaimer") and any redistribution must be conditioned upon .\" including a substantially similar Disclaimer requirement for further .\" binary redistribution. .\" .\" NO WARRANTY .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS .\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT .\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR .\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT .\" HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, .\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING .\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGES. .\" .\" ctladm utility man page. .\" .\" Author: Ken Merry .\" .\" $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.8#3 $ .\" $FreeBSD$ .\" -.Dd December 21, 2016 +.Dd January 23, 2017 .Dt CTLADM 8 .Os .Sh NAME .Nm ctladm .Nd CAM Target Layer control utility .Sh SYNOPSIS .Nm .Aq Ar command .Op lun .Op generic args .Op command args .Nm .Ic tur .Aq lun .Op general options .Nm .Ic inquiry .Aq lun .Op general options .Nm .Ic reqsense .Aq lun .Op general options .Nm .Ic reportluns .Aq lun .Op general options .Nm .Ic read .Aq lun .Op general options .Aq Fl l Ar lba .Aq Fl d Ar datalen .Aq Fl f Ar file|- .Aq Fl b Ar blocksize_bytes .Op Fl c Ar cdbsize .Op Fl N .Nm .Ic write .Aq lun .Op general options .Aq Fl l Ar lba .Aq Fl d Ar datalen .Aq Fl f Ar file|- .Aq Fl b Ar blocksize_bytes .Op Fl c Ar cdbsize .Op Fl N .Nm .Ic readcap .Aq lun .Op general options .Op Fl c Ar cdbsize .Nm .Ic modesense .Aq lun .Aq Fl m Ar page | Fl l .Op Fl P Ar pc .Op Fl d .Op Fl S Ar subpage .Op Fl c Ar size .Nm .Ic start .Aq lun .Op general options .Op Fl i .Op Fl o .Nm .Ic stop .Aq lun .Op general options .Op Fl i .Op Fl o .Nm .Ic synccache .Aq lun .Op general options .Op Fl l Ar lba .Op Fl b Ar blockcount .Op Fl r .Op Fl i .Op Fl c Ar cdbsize .Nm .Ic lunlist .Nm .Ic delay .Aq lun .Aq Fl l Ar datamove|done .Aq Fl t Ar secs .Op Fl T Ar oneshot|cont .Nm .Ic inject .Aq Fl i Ar action .Aq Fl p Ar pattern .Op Fl r Ar lba,len .Op Fl s Ar len fmt Op Ar args .Op Fl c .Op Fl d Ar delete_id .Nm .Ic create .Aq Fl b Ar backend .Op Fl B Ar blocksize .Op Fl d Ar device_id .Op Fl l Ar lun_id .Op Fl o Ar name=value .Op Fl s Ar size_bytes .Op Fl S Ar serial_num .Op Fl t Ar device_type .Nm .Ic remove .Aq Fl b Ar backend .Aq Fl l Ar lun_id .Op Fl o Ar name=value .Nm .Ic modify .Aq Fl b Ar backend .Aq Fl l Ar lun_id .Op Fl o Ar name=value .Aq Fl s Ar size_bytes .Nm .Ic devlist .Op Fl b Ar backend .Op Fl v .Op Fl x .Nm .Ic port .Op Fl o Ar on|off .Op Fl w Ar wwpn .Op Fl W Ar wwnn .Op Fl p Ar targ_port .Op Fl t Ar fe_type .Nm .Ic portlist .Op Fl f Ar frontend .Op Fl i .Op Fl l .Op Fl p Ar targ_port .Op Fl q .Op Fl v .Op Fl x .Nm .Ic lunmap .Aq Fl p Ar targ_port .Op Fl l Ar pLUN .Op Fl L Ar cLUN .Nm .Ic dumpooa .Nm .Ic dumpstructs .Nm .Ic islist .Op Fl v .Op Fl x .Nm .Ic islogout .Aq Fl a | Fl c Ar connection-id | Fl i Ar name | Fl p Ar portal .Nm .Ic isterminate .Aq Fl a | Fl c Ar connection-id | Fl i Ar name | Fl p Ar portal .Nm .Ic help .Sh DESCRIPTION The .Nm utility is designed to provide a way to access and control the CAM Target Layer (CTL). It provides a way to send .Tn SCSI commands to the CTL layer, and also provides some meta-commands that utilize .Tn SCSI commands. (For instance, the .Ic lunlist command is implemented using the .Tn SCSI REPORT LUNS and INQUIRY commands.) .Pp The .Nm utility has a number of primary functions, many of which require a device identifier. The device identifier takes the following form: .Bl -tag -width 14n .It lun Specify the LUN number to operate on. .El Many of the primary functions of the .Nm utility take the following optional arguments: .Bl -tag -width 10n .It Fl C Ar retries Specify the number of times to retry a command in the event of failure. .It Fl D Ar device Specify the device to open. This allows opening a device other than the default device, .Pa /dev/cam/ctl , to be opened for sending commands. .It Fl I Ar id Specify the initiator number to use. By default, .Nm will use 7 as the initiator number. .El .Pp Primary commands: .Bl -tag -width 11n .It Ic tur Send the .Tn SCSI TEST UNIT READY command to the device and report whether or not it is ready. .It Ic inquiry Send the .Tn SCSI INQUIRY command to the device and display some of the returned inquiry data. .It Ic reqsense Send the .Tn SCSI REQUEST SENSE command to the device and display the returned sense information. .It Ic reportluns Send the .Tn SCSI REPORT LUNS command to the device and display supported LUNs. .It Ic read Send a .Tn SCSI READ command to the device, and write the requested data to a file or stdout. .Bl -tag -width 12n .It Fl l Ar lba Specify the starting Logical Block Address for the READ. This can be specified in decimal, octal (starting with 0), hexadecimal (starting with 0x) or any other base supported by .Xr strtoull 3 . .It Fl d Ar datalen Specify the length, in 512 byte blocks, of the READ request. .It Fl f Ar file Specify the destination for the data read by the READ command. Either a filename or .Sq - for stdout may be specified. .It Fl c Ar cdbsize Specify the minimum .Tn SCSI CDB (Command Data Block) size to be used for the READ request. Allowable values are 6, 10, 12 and 16. Depending upon the LBA and amount of data requested, a larger CDB size may be used to satisfy the request. (e.g., for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.) .It Fl b Ar blocksize Specify the blocksize of the underlying .Tn SCSI device, so the transfer length can be calculated accurately. The blocksize can be obtained via the .Tn SCSI READ CAPACITY command. .It Fl N Do not copy data to .Nm from the kernel when doing a read, just execute the command without copying data. This is to be used for performance testing. .El .It Ic write Read data from a file or stdin, and write the data to the device using the .Tn SCSI WRITE command. .Bl -tag -width 12n .It Fl l Ar lba Specify the starting Logical Block Address for the WRITE. This can be specified in decimal, octal (starting with 0), hexadecimal (starting with 0x) or any other base supported by .Xr strtoull 3 . .It Fl d Ar atalen Specify the length, in 512 byte blocks, of the WRITE request. .It Fl f Ar file Specify the source for the data to be written by the WRITE command. Either a filename or .Sq - for stdin may be specified. .It Fl c Ar cdbsize Specify the minimum .Tn SCSI CDB (Command Data Block) size to be used for the READ request. Allowable values are 6, 10, 12 and 16. Depending upon the LBA and amount of data requested, a larger CDB size may be used to satisfy the request. (e.g., for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.) .It Fl b Ar blocksize Specify the blocksize of the underlying .Tn SCSI device, so the transfer length can be calculated accurately. The blocksize can be obtained via the .Tn SCSI READ CAPACITY command. .It Fl N Do not copy data to .Nm to the kernel when doing a write, just execute the command without copying data. This is to be used for performance testing. .El .It Ic readcap Send the .Tn SCSI READ CAPACITY command to the device and display the device size and device block size. By default, READ CAPACITY(10) is used. If the device returns a maximum LBA of 0xffffffff, however, .Nm will automatically issue a READ CAPACITY(16), which is implemented as a service action of the SERVICE ACTION IN(16) opcode. The user can specify the minimum CDB size with the .Fl c argument. Valid values for the .Fl c option are 10 and 16. If a 10 byte CDB is specified, the request will be automatically reissued with a 16 byte CDB if the maximum LBA returned is 0xffffffff. .It Ic modesense Send a .Tn SCSI MODE SENSE command to the device, and display the requested mode page(s) or page list. .Bl -tag -width 10n .It Fl m Ar page Specify the mode page to display. This option and the .Fl l option are mutually exclusive. One of the two must be specified, though. Mode page numbers may be specified in decimal or hexadecimal. .It Fl l Request that the list of mode pages supported by the device be returned. This option and the .Fl m option are mutually exclusive. One of the two must be specified, though. .It Fl P Ar pc Specify the mode page control value. Possible values are: .Bl -tag -width 2n -compact .It 0 Current values. .It 1 Changeable value bitmask. .It 2 Default values. .It 3 Saved values. .El .It Fl d Disable block descriptors when sending the mode sense request. .It Fl S Ar subpage Specify the subpage used with the mode sense request. .It Fl c Ar cdbsize Specify the CDB size used for the mode sense request. Supported values are 6 and 10. .El .It Ic start Send the .Tn SCSI START STOP UNIT command to the specified LUN with the start bit set. .Bl -tag -width 4n .It Fl i Set the immediate bit in the CDB. Note that CTL does not support the immediate bit, so this is primarily useful for making sure that CTL returns the proper error. .El .It Ic stop Send the .Tn SCSI START STOP UNIT command to the specified LUN with the start bit cleared. We use an ordered tag to stop the LUN, so we can guarantee that all pending I/O executes before it is stopped. (CTL guarantees this anyway, but .Nm sends an ordered tag for completeness.) .Bl -tag -width 4n .It Fl i Set the immediate bit in the CDB. Note that CTL does not support the immediate bit, so this is primarily useful for making sure that CTL returns the proper error. .El .It Ic synccache Send the .Tn SCSI SYNCHRONIZE CACHE command to the device. By default, SYNCHRONIZE CACHE(10) is used. If the specified starting LBA is greater than 0xffffffff or the length is greater than 0xffff, though, SYNCHRONIZE CACHE(16) will be used. The 16 byte command will also be used if the user specifies a 16 byte CDB with the .Fl c argument. .Bl -tag -width 14n .It Fl l Ar lba Specify the starting LBA of the cache region to synchronize. This option is a no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the cache for the entire LUN. .It Fl b Ar blockcount Specify the length of the cache region to synchronize. This option is a no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the cache for the entire LUN. .It Fl r Specify relative addressing for the starting LBA. CTL does not support relative addressing, since it only works for linked commands, and CTL does not support linked commands. .It Fl i Tell the target to return status immediately after issuing the SYNCHRONIZE CACHE command rather than waiting for the cache to finish syncing. CTL does not support this bit. .It Fl c Ar cdbsize Specify the minimum CDB size. Valid values are 10 and 16 bytes. .El .It Ic lunlist List all LUNs registered with CTL. Because this command uses the ioctl port, it will only work when the FETDs (Front End Target Drivers) are enabled. This command is the equivalent of doing a REPORT LUNS on one LUN and then an INQUIRY on each LUN in the system. .It Ic delay Delay commands at the given location. There are two places where commands may be delayed currently: before data is transferred .Pq Dq datamove and just prior to sending status to the host .Pq Dq done . One of the two must be supplied as an argument to the .Fl l option. The .Fl t option must also be specified. .Bl -tag -width 12n .It Fl l Ar delayloc Delay command(s) at the specified location. This can either be at the data movement stage (datamove) or prior to command completion (done). .It Fl t Ar delaytime Delay command(s) for the specified number of seconds. This must be specified. If set to 0, it will clear out any previously set delay for this particular location (datamove or done). .It Fl T Ar delaytype Specify the delay type. By default, the .Ic delay option will delay the next command sent to the given LUN. With the .Fl T Ar cont option, every command will be delayed by the specified period of time. With the .Fl T Ar oneshot the next command sent to the given LUN will be delayed and all subsequent commands will be completed normally. This is the default. .El .It Ic inject Inject the specified type of error for the LUN specified, when a command that matches the given pattern is seen. The sense data returned is in either fixed or descriptor format, depending upon the status of the D_SENSE bit in the control mode page (page 0xa) for the LUN. .Pp Errors are only injected for commands that have not already failed for other reasons. By default, only the first command matching the pattern specified is returned with the supplied error. .Pp If the .Fl c flag is specified, all commands matching the pattern will be returned with the specified error until the error injection command is deleted with .Fl d flag. .Bl -tag -width 17n .It Fl i Ar action Specify the error to return: .Bl -tag -width 10n .It aborted Return the next matching command on the specified LUN with the sense key ABORTED COMMAND (0x0b), and the ASC/ASCQ 0x45,0x00 ("Select or reselect failure"). .It mediumerr Return the next matching command on the specified LUN with the sense key MEDIUM ERROR (0x03) and the ASC/ASCQ 0x11,0x00 ("Unrecovered read error") for reads, or ASC/ASCQ 0x0c,0x02 ("Write error - auto reallocation failed") for write errors. .It ua Return the next matching command on the specified LUN with the sense key UNIT ATTENTION (0x06) and the ASC/ASCQ 0x29,0x00 ("POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"). .It custom Return the next matching command on the specified LUN with the supplied sense data. The .Fl s argument must be specified. .El .It Fl p Ar pattern Specify which commands should be returned with the given error. .Bl -tag -width 10n .It read The error should apply to READ(6), READ(10), READ(12), READ(16), etc. .It write The error should apply to WRITE(6), WRITE(10), WRITE(12), WRITE(16), WRITE AND VERIFY(10), etc. .It rw The error should apply to both read and write type commands. .It readcap The error should apply to READ CAPACITY(10) and READ CAPACITY(16) commands. .It tur The error should apply to TEST UNIT READY commands. .It any The error should apply to any command. .El .It Fl r Ar lba,len Specify the starting lba and length of the range of LBAs which should trigger an error. This option is only applies when read and/or write patterns are specified. If used with other command types, the error will never be triggered. .It Fl s Ar len fmt Op Ar args Specify the sense data that is to be returned for custom actions. If the format is .Sq - , len bytes of sense data will be read from standard input and written to the sense buffer. If len is longer than 252 bytes (the maximum allowable .Tn SCSI sense data length), it will be truncated to that length. The sense data format is described in .Xr cam_cdbparse 3 . .It Fl c The error injection should be persistent, instead of happening once. Persistent errors must be deleted with the .Fl d argument. .It Fl d Ar delete_id Delete the specified error injection serial number. The serial number is returned when the error is injected. .El .It Ic port Perform one of several CTL frontend port operations. Either get a list of frontend ports .Pq Fl l , turn one or more frontends on or off .Pq Fl o Ar on|off , or set the World Wide Node Name .Pq Fl w Ar wwnn or World Wide Port Name .Pq Fl W Ar wwpn for a given port. One of .Fl l , .Fl o , or .Fl w or .Fl W must be specified. The WWNN and WWPN may both be specified at the same time, but cannot be combined with enabling/disabling or listing ports. .Bl -tag -width 12n .It Fl o Ar on|off Turn the specified CTL frontend ports off or on. If no port number or port type is specified, all ports are turned on or off. .It Fl p Ar targ_port Specify the frontend port number. The port numbers can be found in the frontend port list. .It Fl t Ar fe_type Specify the frontend type. Currently defined port types are .Dq fc (Fibre Channel), .Dq scsi (Parallel SCSI), .Dq ioctl (CTL ioctl interface), and .Dq internal (CTL CAM SIM). .It Fl w Ar wwnn Set the World Wide Node Name for the given port. The .Fl n argument must be specified, since this is only possible to implement on a single port. As a general rule, the WWNN should be the same across all ports on the system. .It Fl W Ar wwpn Set the World Wide Port Name for the given port. The .Fl n argument must be specified, since this is only possible to implement on a single port. As a general rule, the WWPN must be different for every port in the system. .El .It Ic portlist List CTL frontend ports. .Bl -tag -width 12n .It Fl f Ar frontend Specify the frontend type. .It Fl i Report target and connected initiators addresses. .It Fl l Report LUN mapping. .It Fl p Ar targ_port Specify the frontend port number. .It Fl q Omit the header in the port list output. .It Fl v Enable verbose output (report all port options). .It Fl x Output the port list in XML format. .El .It Ic lunmap Change LUN mapping for specified port. If both .Ar pLUN and .Ar cLUN are specified -- LUN will be mapped. If .Ar pLUN is specified, but .Ar cLUN is not -- LUN will be unmapped. If neither .Ar pLUN nor .Ar cLUN are specified -- LUN mapping will be disabled, exposing all CTL LUNs. .Bl -tag -width 12n .It Fl p Ar targ_port Specify the frontend port number. .It Fl l Ar pLUN LUN number visible by specified port. .It Fl L Ar cLUN CTL LUN number. .El .It Ic dumpooa Dump the OOA (Order Of Arrival) queue for each LUN registered with CTL. .It Ic dumpstructs Dump the CTL structures to the console. .It Ic create Create a new LUN. The backend must be specified, and depending upon the backend requested, some of the other options may be required. If the LUN is created successfully, the LUN configuration will be displayed. If LUN creation fails, a message will be displayed describing the failure. .Bl -tag -width 14n .It Fl b Ar backend The .Fl b flag is required. This specifies the name backend to use when creating the LUN. Examples are .Dq ramdisk and .Dq block . .It Fl B Ar blocksize Specify the blocksize of the backend in bytes. .It Fl d Ar device_id Specify the LUN-associated string to use in the .Tn SCSI INQUIRY VPD page 0x83 data. .It Fl l Ar lun_id Request that a particular LUN number be assigned. If the requested LUN number is not available, the request will fail. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .It Fl s Ar size_bytes Specify the size of the LUN in bytes. Some backends may allow setting the size (e.g. the ramdisk backend) and for others the size may be implicit (e.g. the block backend). .It Fl S Ar serial_num Specify the serial number to be used in the .Tn SCSI INQUIRY VPD page 0x80 data. .It Fl t Ar device_type Specify the numeric SCSI device type to use when creating the LUN. If this flag is not used, the type of LUN created is backend-specific. Not all LUN types are supported. Currently CTL supports Direct Access (type 0), Processor (type 3) and CD/DVD (type 5) LUNs. The backend requested may or may not support all of the LUN types that CTL supports. .El .It Ic remove Remove a LUN. The backend must be specified, and the LUN number must also be specified. Backend-specific options may also be specified with the .Fl o flag. .Bl -tag -width 14n .It Fl b Ar backend Specify the backend that owns the LUN to be removed. Examples are .Dq ramdisk and .Dq block . .It Fl l Ar lun_id Specify the LUN number to remove. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .El .It Ic modify Modify a LUN size. The backend, the LUN number, and the size must be specified. .Bl -tag -width 14n .It Fl b Ar backend Specify the backend that owns the LUN to be removed. Examples are .Dq ramdisk and .Dq block . .It Fl l Ar lun_id Specify the LUN number to remove. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .It Fl s Ar size_bytes Specify the size of the LUN in bytes. For the .Dq block backend, an .Dq auto keyword may be passed instead; this will make CTL use the size of backing file or device. .El .It Ic devlist Get a list of all configured LUNs. This also includes the LUN size and blocksize, serial number and device ID. .Bl -tag -width 11n .It Fl b Ar backend Specify the backend. This restricts the LUN list to the named backend. Examples are .Dq ramdisk and .Dq block . .It Fl v Be verbose. This will also display any backend-specific LUN attributes in addition to the standard per-LUN information. .It Fl x Dump the raw XML. The LUN list information from the kernel comes in XML format, and this option allows the display of the raw XML data. This option and the .Fl v and .Fl b options are mutually exclusive. If you specify .Fl x , the entire LUN database is displayed in XML format. .El .It Ic islist Get a list of currently running iSCSI sessions. This includes initiator and target names and the unique connection IDs. .Bl -tag -width 11n .It Fl v Verbose mode. .It Fl x Dump the raw XML. The sessions list information from the kernel comes in XML format, and this option allows the display of the raw XML data. .El .It Ic islogout Ask the initiator to log out iSCSI sessions matching criteria. .Bl -tag -width 11n .It Fl a Log out all sessions. .It Fl c Specify connection ID. .It Fl i Specify initiator name. .It Fl p Specify initiator portal (hostname or IP address). .El .It Ic isterminate Forcibly terminate iSCSI sessions matching criteria. .Bl -tag -width 11n .It Fl a Terminate all sessions. .It Fl c Specify connection ID. .It Fl i Specify initiator name. .It Fl p Specify initiator portal (hostname or IP address). .El .It Ic help Display .Nm usage information. .El .Sh OPTIONS Number of additional configuration options may be specified for LUNs. Some options are global, others are backend-specific. .Pp Global options: .Bl -tag -width 12n .It Va vendor Specifies LUN vendor string up to 8 chars. .It Va product Specifies LUN product string up to 16 chars. .It Va revision Specifies LUN revision string up to 4 chars. .It Va scsiname Specifies LUN SCSI name string. .It Va eui Specifies LUN EUI-64 identifier. .It Va naa Specifies LUN NAA identifier. .It Va uuid Specifies LUN locally assigned RFC 4122 UUID identifier. EUI, NAA or UUID identifier should be set to UNIQUE value to allow EXTENDED COPY command access the LUN. Non-unique LUN identifiers may lead to data corruption. Some initiators may not support later introduced UUID identifiers. .It Va ha_role Setting to "primary" or "secondary" overrides default role of the node in HA cluster, set by kern.cam.ctl.ha_role sysctl. .It Va insecure_tpc Setting to "on" allows EXTENDED COPY command sent to this LUN access other LUNs on this host, not accessible otherwise. This allows to offload copying between different iSCSI targets residing on the same host in trusted environments. .It Va readcache Set to "off", disables read caching for the LUN, if supported by the backend. .It Va readonly Set to "on", blocks all media write operations to the LUN, reporting it as write protected. .It Va removable Set to "on", makes LUN removable. .It Va reordering Set to "unrestricted", allows target to process commands with SIMPLE task attribute in arbitrary order. Any data integrity exposures related to command sequence order shall be explicitly handled by the application client through the selection of appropriate commands and task attributes. The default value is "restricted". It improves data integrity, but may introduce some additional delays. .It Va serseq Set to "on" to serialize conseсutive reads/writes. Set to "read" to serialize conseсutive reads. Set to "off" to allow them be issued in parallel. Parallel issue of consecutive operations may confuse logic of the backing file system, hurting performance; but it may improve performance of backing stores without prefetch/write-back. .It Va pblocksize .It Va pblockoffset Specify physical block size and offset of the device. .It Va ublocksize .It Va ublockoffset Specify UNMAP block size and offset of the device. .It Va rpm Specifies medium rotation rate of the device: 0 -- not reported, 1 -- non-rotating (SSD), >1024 -- value in revolutions per minute. .It Va formfactor Specifies nominal form factor of the device: 0 -- not reported, 1 -- 5.25", 2 -- 3.5", 3 -- 2.5", 4 -- 1.8", 5 -- less then 1.8". .It Va provisioning_type When UNMAP support is enabled, this option specifies provisioning type: "resource", "thin" or "unknown". Default value is "thin". Logical units without UNMAP support are reported as fully provisioned. .It Va unmap Setting to "on" or "off" controls UNMAP support for the logical unit. Default value is "on" if supported by the backend. .It Va unmap_max_lba .It Va unmap_max_descr Specify maximum allowed number of LBAs and block descriptors per UNMAP command to report in Block Limits VPD page. .It Va write_same_max_lba Specify maximum allowed number of LBAs per WRITE SAME command to report in Block Limits VPD page. .It Va avail-threshold .It Va used-threshold .It Va pool-avail-threshold .It Va pool-used-threshold Set per-LUN/-pool thin provisioning soft thresholds. LUN will establish UNIT ATTENTION condition if its or pool available space get below configured avail values, or its or pool used space get above configured used values. Pool thresholds are working only for ZVOL-backed LUNs. .It Va writecache Set to "off", disables write caching for the LUN, if supported by the backend. .El .Pp Options specific for block backend: .Bl -tag -width 12n .It Va file Specifies file or device name to use for backing store. .It Va num_threads Specifies number of backend threads to use for this LUN. .El +.Pp +Options specific for ramdisk backend: +.Bl -tag -width 12n +.It Va capacity +Specifies capacity of backing store (maximum RAM for data). +The default value is zero, that disables backing store completely, +making all writes go to nowhere, while all reads return zeroes. +.El .Sh EXAMPLES .Dl ctladm tur 1 .Pp Send a .Tn SCSI TEST UNIT READY command to LUN 1. .Pp .Dl ctladm modesense 1 -l .Pp Display the list of mode pages supported by LUN 1. .Pp .Dl ctladm modesense 0 -m 10 -P 3 -d -c 10 .Pp Display the saved version of the Control mode page (page 10) on LUN 0. Disable fetching block descriptors, and use a 10 byte MODE SENSE command instead of the default 6 byte command. .Bd -literal ctladm read 2 -l 0 -d 1 -b 512 -f - > foo .Ed .Pp Read the first 512 byte block from LUN 2 and dump it to the file .Pa foo . .Bd -literal ctladm write 3 -l 0xff432140 -d 20 -b 512 -f /tmp/bar .Ed .Pp Read 10240 bytes from the file .Pa /tmp/bar and write it to LUN 3. starting at LBA 0xff432140. .Pp .Dl ctladm create -b ramdisk -s 10485760000000000 .Pp Create a LUN with the .Dq fake ramdisk as a backing store. -The LUN will claim to have a size of approximately 10 terabytes. +The LUN will claim to have a size of approximately 10 terabytes, +while having no real data store (all written data are lost). +.Pp +.Dl ctladm create -b ramdisk -s 10T -o capacity=10G +.Pp +Create a thin provisioned LUN with a ramdisk as a backing store. +The LUN will have maximal backing store capacity of 10 gigabytes, +while reporting size of 10 terabytes, .Pp .Dl ctladm create -b block -o file=src/usr.sbin/ctladm/ctladm.8 .Pp Create a LUN using the block backend, and specify the file .Pa src/usr.sbin/ctladm/ctladm.8 as the backing store. The size of the LUN will be derived from the size of the file. .Pp .Dl ctladm create -b block -o file=src/usr.sbin/ctladm/ctladm.8 -S MYSERIAL321 -d MYDEVID123 .Pp Create a LUN using the block backend, specify the file .Pa src/usr.sbin/ctladm/ctladm.8 as the backing store, and specify the .Tn SCSI VPD page 0x80 and 0x83 serial number .Fl ( S ) and device ID .Fl ( d ) . .Pp .Dl ctladm remove -b block -l 12 .Pp Remove LUN 12, which is handled by the block backend, from the system. .Pp .Dl ctladm devlist .Pp List configured LUNs in the system, along with their backend and serial number. This works when the Front End Target Drivers are enabled or disabled. .Pp .Dl ctladm lunlist .Pp List all LUNs in the system, along with their inquiry data and device type. This only works when the FETDs are enabled, since the commands go through the ioctl port. .Pp .Dl ctladm inject 6 -i mediumerr -p read -r 0,512 -c .Pp Inject a medium error on LUN 6 for every read that covers the first 512 blocks of the LUN. .Bd -literal -offset indent ctladm inject 6 -i custom -p tur -s 18 "f0 0 02 s12 04 02" .Ed .Pp Inject a custom error on LUN 6 for the next TEST UNIT READY command only. This will result in a sense key of NOT READY (0x02), and an ASC/ASCQ of 0x04,0x02 ("Logical unit not ready, initializing command required"). .Sh SEE ALSO .Xr cam 3 , .Xr cam_cdbparse 3 , .Xr cam 4 , .Xr ctl 4 , .Xr xpt 4 , .Xr camcontrol 8 , .Xr ctld 8 , .Xr ctlstat 8 .Sh HISTORY The .Nm utility was originally written during the Winter/Spring of 2003 as an interface to CTL. .Sh AUTHORS .An Ken Merry Aq Mt ken@FreeBSD.org Index: projects/clang400-import/usr.sbin/pw/psdate.c =================================================================== --- projects/clang400-import/usr.sbin/pw/psdate.c (revision 312719) +++ projects/clang400-import/usr.sbin/pw/psdate.c (revision 312720) @@ -1,261 +1,257 @@ /*- * Copyright (C) 1996 * David L. Nugent. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY DAVID L. NUGENT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL DAVID L. NUGENT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ #include #include #include #include #include #include "psdate.h" static int numerics(char const * str) { - int rc = isdigit((unsigned char)*str); - if (rc) - while (isdigit((unsigned char)*str) || *str == 'x') - ++str; - return rc && !*str; + return (str[strspn(str, "0123456789x")] == '\0'); } static int aindex(char const * arr[], char const ** str, int len) { int l, i; char mystr[32]; mystr[len] = '\0'; l = strlen(strncpy(mystr, *str, len)); for (i = 0; i < l; i++) mystr[i] = (char) tolower((unsigned char)mystr[i]); for (i = 0; arr[i] && strcmp(mystr, arr[i]) != 0; i++); if (arr[i] == NULL) i = -1; else { /* Skip past it */ while (**str && isalpha((unsigned char)**str)) ++(*str); /* And any following whitespace */ while (**str && (**str == ',' || isspace((unsigned char)**str))) ++(*str); } /* Return index */ return i; } static int weekday(char const ** str) { static char const *days[] = {"sun", "mon", "tue", "wed", "thu", "fri", "sat", NULL}; return aindex(days, str, 3); } static void parse_datesub(char const * str, struct tm *t) { struct tm tm; locale_t l; int i; char *ret; const char *valid_formats[] = { "%d-%b-%y", "%d-%b-%Y", "%d-%m-%y", "%d-%m-%Y", "%H:%M %d-%b-%y", "%H:%M %d-%b-%Y", "%H:%M %d-%m-%y", "%H:%M %d-%m-%Y", "%H:%M:%S %d-%b-%y", "%H:%M:%S %d-%b-%Y", "%H:%M:%S %d-%m-%y", "%H:%M:%S %d-%m-%Y", "%d-%b-%y %H:%M", "%d-%b-%Y %H:%M", "%d-%m-%y %H:%M", "%d-%m-%Y %H:%M", "%d-%b-%y %H:%M:%S", "%d-%b-%Y %H:%M:%S", "%d-%m-%y %H:%M:%S", "%d-%m-%Y %H:%M:%S", "%H:%M\t%d-%b-%y", "%H:%M\t%d-%b-%Y", "%H:%M\t%d-%m-%y", "%H:%M\t%d-%m-%Y", "%H:%M\t%S %d-%b-%y", "%H:%M\t%S %d-%b-%Y", "%H:%M\t%S %d-%m-%y", "%H:%M\t%S %d-%m-%Y", "%d-%b-%y\t%H:%M", "%d-%b-%Y\t%H:%M", "%d-%m-%y\t%H:%M", "%d-%m-%Y\t%H:%M", "%d-%b-%y\t%H:%M:%S", "%d-%b-%Y\t%H:%M:%S", "%d-%m-%y\t%H:%M:%S", "%d-%m-%Y\t%H:%M:%S", NULL, }; l = newlocale(LC_ALL_MASK, "C", NULL); memset(&tm, 0, sizeof(tm)); for (i=0; valid_formats[i] != NULL; i++) { ret = strptime_l(str, valid_formats[i], &tm, l); if (ret && *ret == '\0') { t->tm_mday = tm.tm_mday; t->tm_mon = tm.tm_mon; t->tm_year = tm.tm_year; t->tm_hour = tm.tm_hour; t->tm_min = tm.tm_min; t->tm_sec = tm.tm_sec; freelocale(l); return; } } freelocale(l); errx(EXIT_FAILURE, "Invalid date"); } /*- * Parse time must be flexible, it handles the following formats: * nnnnnnnnnnn UNIX timestamp (all numeric), 0 = now * 0xnnnnnnnn UNIX timestamp in hexadecimal * 0nnnnnnnnn UNIX timestamp in octal * 0 Given time * +nnnn[smhdwoy] Given time + nnnn hours, mins, days, weeks, months or years * -nnnn[smhdwoy] Given time - nnnn hours, mins, days, weeks, months or years * dd[ ./-]mmm[ ./-]yy Date } * hh:mm:ss Time } May be combined */ time_t parse_date(time_t dt, char const * str) { char *p; int i; long val; struct tm *T; if (dt == 0) dt = time(NULL); while (*str && isspace((unsigned char)*str)) ++str; if (numerics(str)) { dt = strtol(str, &p, 0); } else if (*str == '+' || *str == '-') { val = strtol(str, &p, 0); switch (*p) { case 'h': case 'H': /* hours */ dt += (val * 3600L); break; case '\0': case 'm': case 'M': /* minutes */ dt += (val * 60L); break; case 's': case 'S': /* seconds */ dt += val; break; case 'd': case 'D': /* days */ dt += (val * 86400L); break; case 'w': case 'W': /* weeks */ dt += (val * 604800L); break; case 'o': case 'O': /* months */ T = localtime(&dt); T->tm_mon += (int) val; i = T->tm_mday; goto fixday; case 'y': case 'Y': /* years */ T = localtime(&dt); T->tm_year += (int) val; i = T->tm_mday; fixday: dt = mktime(T); T = localtime(&dt); if (T->tm_mday != i) { T->tm_mday = 1; dt = mktime(T); dt -= (time_t) 86400L; } default: /* unknown */ break; /* leave untouched */ } } else { char *q, tmp[64]; /* * Skip past any weekday prefix */ weekday(&str); strlcpy(tmp, str, sizeof(tmp)); str = tmp; T = localtime(&dt); /* * See if we can break off any timezone */ while ((q = strrchr(tmp, ' ')) != NULL) { if (strchr("(+-", q[1]) != NULL) *q = '\0'; else { int j = 1; while (q[j] && isupper((unsigned char)q[j])) ++j; if (q[j] == '\0') *q = '\0'; else break; } } parse_datesub(tmp, T); dt = mktime(T); } return dt; } Index: projects/clang400-import/usr.sbin/pw/pw_user.c =================================================================== --- projects/clang400-import/usr.sbin/pw/pw_user.c (revision 312719) +++ projects/clang400-import/usr.sbin/pw/pw_user.c (revision 312720) @@ -1,1813 +1,1811 @@ /*- * Copyright (C) 1996 * David L. Nugent. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY DAVID L. NUGENT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL DAVID L. NUGENT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pw.h" #include "bitmap.h" #include "psdate.h" #define LOGNAMESIZE (MAXLOGNAME-1) static char locked_str[] = "*LOCKED*"; static struct passwd fakeuser = { "nouser", "*", -1, -1, 0, "", "User &", "/nonexistent", "/bin/sh", 0, 0 }; static int print_user(struct passwd *pwd, bool pretty, bool v7); static uid_t pw_uidpolicy(struct userconf *cnf, intmax_t id); static uid_t pw_gidpolicy(struct userconf *cnf, char *grname, char *nam, gid_t prefer, bool dryrun); static char *pw_homepolicy(struct userconf * cnf, char *homedir, const char *user); static char *pw_shellpolicy(struct userconf * cnf); static char *pw_password(struct userconf * cnf, char const * user, bool dryrun); static char *shell_path(char const * path, char *shells[], char *sh); static void rmat(uid_t uid); static void rmopie(char const * name); static void mkdir_home_parents(int dfd, const char *dir) { struct stat st; char *dirs, *tmp; if (*dir != '/') errx(EX_DATAERR, "invalid base directory for home '%s'", dir); dir++; if (fstatat(dfd, dir, &st, 0) != -1) { if (S_ISDIR(st.st_mode)) return; errx(EX_OSFILE, "root home `/%s' is not a directory", dir); } dirs = strdup(dir); if (dirs == NULL) errx(EX_UNAVAILABLE, "out of memory"); tmp = strrchr(dirs, '/'); if (tmp == NULL) { free(dirs); return; } tmp[0] = '\0'; /* * This is a kludge especially for Joerg :) * If the home directory would be created in the root partition, then * we really create it under /usr which is likely to have more space. * But we create a symlink from cnf->home -> "/usr" -> cnf->home */ if (strchr(dirs, '/') == NULL) { asprintf(&tmp, "usr/%s", dirs); if (tmp == NULL) errx(EX_UNAVAILABLE, "out of memory"); if (mkdirat(dfd, tmp, _DEF_DIRMODE) != -1 || errno == EEXIST) { fchownat(dfd, tmp, 0, 0, 0); symlinkat(tmp, dfd, dirs); } free(tmp); } tmp = dirs; if (fstatat(dfd, dirs, &st, 0) == -1) { while ((tmp = strchr(tmp + 1, '/')) != NULL) { *tmp = '\0'; if (fstatat(dfd, dirs, &st, 0) == -1) { if (mkdirat(dfd, dirs, _DEF_DIRMODE) == -1) err(EX_OSFILE, "'%s' (root home parent) is not a directory", dirs); } *tmp = '/'; } } if (fstatat(dfd, dirs, &st, 0) == -1) { if (mkdirat(dfd, dirs, _DEF_DIRMODE) == -1) err(EX_OSFILE, "'%s' (root home parent) is not a directory", dirs); fchownat(dfd, dirs, 0, 0, 0); } free(dirs); } static void create_and_populate_homedir(struct userconf *cnf, struct passwd *pwd, const char *skeldir, mode_t homemode, bool update) { int skelfd = -1; /* Create home parents directories */ mkdir_home_parents(conf.rootfd, pwd->pw_dir); if (skeldir != NULL && *skeldir != '\0') { if (*skeldir == '/') skeldir++; skelfd = openat(conf.rootfd, skeldir, O_DIRECTORY|O_CLOEXEC); } copymkdir(conf.rootfd, pwd->pw_dir, skelfd, homemode, pwd->pw_uid, pwd->pw_gid, 0); pw_log(cnf, update ? M_UPDATE : M_ADD, W_USER, "%s(%ju) home %s made", pwd->pw_name, (uintmax_t)pwd->pw_uid, pwd->pw_dir); } static int pw_set_passwd(struct passwd *pwd, int fd, bool precrypted, bool update) { int b, istty; struct termios t, n; login_cap_t *lc; char line[_PASSWORD_LEN+1]; char *p; if (fd == '-') { if (!pwd->pw_passwd || *pwd->pw_passwd != '*') { pwd->pw_passwd = "*"; /* No access */ return (1); } return (0); } if ((istty = isatty(fd))) { if (tcgetattr(fd, &t) == -1) istty = 0; else { n = t; n.c_lflag &= ~(ECHO); tcsetattr(fd, TCSANOW, &n); printf("%s%spassword for user %s:", update ? "new " : "", precrypted ? "encrypted " : "", pwd->pw_name); fflush(stdout); } } b = read(fd, line, sizeof(line) - 1); if (istty) { /* Restore state */ tcsetattr(fd, TCSANOW, &t); fputc('\n', stdout); fflush(stdout); } if (b < 0) err(EX_IOERR, "-%c file descriptor", precrypted ? 'H' : 'h'); line[b] = '\0'; if ((p = strpbrk(line, "\r\n")) != NULL) *p = '\0'; if (!*line) errx(EX_DATAERR, "empty password read on file descriptor %d", fd); if (precrypted) { if (strchr(line, ':') != NULL) errx(EX_DATAERR, "bad encrypted password"); pwd->pw_passwd = strdup(line); } else { lc = login_getpwclass(pwd); if (lc == NULL || login_setcryptfmt(lc, "sha512", NULL) == NULL) warn("setting crypt(3) format"); login_close(lc); pwd->pw_passwd = pw_pwcrypt(line); } return (1); } static void perform_chgpwent(const char *name, struct passwd *pwd, char *nispasswd) { int rc; struct passwd *nispwd; /* duplicate for nis so that chgpwent is not modifying before NIS */ if (nispasswd && *nispasswd == '/') nispwd = pw_dup(pwd); rc = chgpwent(name, pwd); if (rc == -1) errx(EX_IOERR, "user '%s' does not exist (NIS?)", pwd->pw_name); else if (rc != 0) err(EX_IOERR, "passwd file update"); if (nispasswd && *nispasswd == '/') { rc = chgnispwent(nispasswd, name, nispwd); if (rc == -1) warn("User '%s' not found in NIS passwd", pwd->pw_name); else if (rc != 0) warn("NIS passwd update"); /* NOTE: NIS-only update errors are not fatal */ } } /* * The M_LOCK and M_UNLOCK functions simply add or remove * a "*LOCKED*" prefix from in front of the password to * prevent it decoding correctly, and therefore prevents * access. Of course, this only prevents access via * password authentication (not ssh, kerberos or any * other method that does not use the UNIX password) but * that is a known limitation. */ static int pw_userlock(char *arg1, int mode) { struct passwd *pwd = NULL; char *passtmp = NULL; char *name; bool locked = false; uid_t id = (uid_t)-1; if (geteuid() != 0) errx(EX_NOPERM, "you must be root"); if (arg1 == NULL) errx(EX_DATAERR, "username or id required"); name = arg1; if (arg1[strspn(name, "0123456789")] == '\0') id = pw_checkid(name, UID_MAX); pwd = GETPWNAM(pw_checkname(name, 0)); if (pwd == NULL && id != (uid_t)-1) { pwd = GETPWUID(id); if (pwd != NULL) name = pwd->pw_name; } if (pwd == NULL) { if (id == (uid_t)-1) errx(EX_NOUSER, "no such name or uid `%ju'", (uintmax_t) id); errx(EX_NOUSER, "no such user `%s'", name); } if (name == NULL) name = pwd->pw_name; if (strncmp(pwd->pw_passwd, locked_str, sizeof(locked_str) -1) == 0) locked = true; if (mode == M_LOCK && locked) errx(EX_DATAERR, "user '%s' is already locked", pwd->pw_name); if (mode == M_UNLOCK && !locked) errx(EX_DATAERR, "user '%s' is not locked", pwd->pw_name); if (mode == M_LOCK) { asprintf(&passtmp, "%s%s", locked_str, pwd->pw_passwd); if (passtmp == NULL) /* disaster */ errx(EX_UNAVAILABLE, "out of memory"); pwd->pw_passwd = passtmp; } else { pwd->pw_passwd += sizeof(locked_str)-1; } perform_chgpwent(name, pwd, NULL); free(passtmp); return (EXIT_SUCCESS); } static uid_t pw_uidpolicy(struct userconf * cnf, intmax_t id) { struct passwd *pwd; struct bitmap bm; uid_t uid = (uid_t) - 1; /* * Check the given uid, if any */ if (id >= 0) { uid = (uid_t) id; if ((pwd = GETPWUID(uid)) != NULL && conf.checkduplicate) errx(EX_DATAERR, "uid `%ju' has already been allocated", (uintmax_t)pwd->pw_uid); return (uid); } /* * We need to allocate the next available uid under one of * two policies a) Grab the first unused uid b) Grab the * highest possible unused uid */ if (cnf->min_uid >= cnf->max_uid) { /* Sanity * claus^H^H^H^Hheck */ cnf->min_uid = 1000; cnf->max_uid = 32000; } bm = bm_alloc(cnf->max_uid - cnf->min_uid + 1); /* * Now, let's fill the bitmap from the password file */ SETPWENT(); while ((pwd = GETPWENT()) != NULL) if (pwd->pw_uid >= (uid_t) cnf->min_uid && pwd->pw_uid <= (uid_t) cnf->max_uid) bm_setbit(&bm, pwd->pw_uid - cnf->min_uid); ENDPWENT(); /* * Then apply the policy, with fallback to reuse if necessary */ if (cnf->reuse_uids || (uid = (uid_t) (bm_lastset(&bm) + cnf->min_uid + 1)) > cnf->max_uid) uid = (uid_t) (bm_firstunset(&bm) + cnf->min_uid); /* * Another sanity check */ if (uid < cnf->min_uid || uid > cnf->max_uid) errx(EX_SOFTWARE, "unable to allocate a new uid - range fully used"); bm_dealloc(&bm); return (uid); } static uid_t pw_gidpolicy(struct userconf *cnf, char *grname, char *nam, gid_t prefer, bool dryrun) { struct group *grp; gid_t gid = (uid_t) - 1; /* * Check the given gid, if any */ SETGRENT(); if (grname) { if ((grp = GETGRNAM(grname)) == NULL) { gid = pw_checkid(grname, GID_MAX); grp = GETGRGID(gid); } gid = grp->gr_gid; } else if ((grp = GETGRNAM(nam)) != NULL && (grp->gr_mem == NULL || grp->gr_mem[0] == NULL)) { gid = grp->gr_gid; /* Already created? Use it anyway... */ } else { intmax_t grid = -1; /* * We need to auto-create a group with the user's name. We * can send all the appropriate output to our sister routine * bit first see if we can create a group with gid==uid so we * can keep the user and group ids in sync. We purposely do * NOT check the gid range if we can force the sync. If the * user's name dups an existing group, then the group add * function will happily handle that case for us and exit. */ if (GETGRGID(prefer) == NULL) grid = prefer; if (dryrun) { gid = pw_groupnext(cnf, true); } else { if (grid == -1) grid = pw_groupnext(cnf, true); groupadd(cnf, nam, grid, NULL, -1, false, false, false); if ((grp = GETGRNAM(nam)) != NULL) gid = grp->gr_gid; } } ENDGRENT(); return (gid); } static char * pw_homepolicy(struct userconf * cnf, char *homedir, const char *user) { static char home[128]; if (homedir) return (homedir); if (cnf->home == NULL || *cnf->home == '\0') errx(EX_CONFIG, "no base home directory set"); snprintf(home, sizeof(home), "%s/%s", cnf->home, user); return (home); } static char * shell_path(char const * path, char *shells[], char *sh) { if (sh != NULL && (*sh == '/' || *sh == '\0')) return sh; /* specified full path or forced none */ else { char *p; char paths[_UC_MAXLINE]; /* * We need to search paths */ strlcpy(paths, path, sizeof(paths)); for (p = strtok(paths, ": \t\r\n"); p != NULL; p = strtok(NULL, ": \t\r\n")) { int i; static char shellpath[256]; if (sh != NULL) { snprintf(shellpath, sizeof(shellpath), "%s/%s", p, sh); if (access(shellpath, X_OK) == 0) return shellpath; } else for (i = 0; i < _UC_MAXSHELLS && shells[i] != NULL; i++) { snprintf(shellpath, sizeof(shellpath), "%s/%s", p, shells[i]); if (access(shellpath, X_OK) == 0) return shellpath; } } if (sh == NULL) errx(EX_OSFILE, "can't find shell `%s' in shell paths", sh); errx(EX_CONFIG, "no default shell available or defined"); return NULL; } } static char * pw_shellpolicy(struct userconf * cnf) { return shell_path(cnf->shelldir, cnf->shells, cnf->shell_default); } #define SALTSIZE 32 static char const chars[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ./"; char * pw_pwcrypt(char *password) { int i; char salt[SALTSIZE + 1]; char *cryptpw; static char buf[256]; /* * Calculate a salt value */ for (i = 0; i < SALTSIZE; i++) salt[i] = chars[arc4random_uniform(sizeof(chars) - 1)]; salt[SALTSIZE] = '\0'; cryptpw = crypt(password, salt); if (cryptpw == NULL) errx(EX_CONFIG, "crypt(3) failure"); return strcpy(buf, cryptpw); } static char * pw_password(struct userconf * cnf, char const * user, bool dryrun) { int i, l; char pwbuf[32]; switch (cnf->default_password) { case -1: /* Random password */ l = (arc4random() % 8 + 8); /* 8 - 16 chars */ for (i = 0; i < l; i++) pwbuf[i] = chars[arc4random_uniform(sizeof(chars)-1)]; pwbuf[i] = '\0'; /* * We give this information back to the user */ if (conf.fd == -1 && !dryrun) { if (isatty(STDOUT_FILENO)) printf("Password for '%s' is: ", user); printf("%s\n", pwbuf); fflush(stdout); } break; case -2: /* No password at all! */ return ""; case 0: /* No login - default */ default: return "*"; case 1: /* user's name */ strlcpy(pwbuf, user, sizeof(pwbuf)); break; } return pw_pwcrypt(pwbuf); } static int print_user(struct passwd * pwd, bool pretty, bool v7) { int j; char *p; struct group *grp = GETGRGID(pwd->pw_gid); char uname[60] = "User &", office[60] = "[None]", wphone[60] = "[None]", hphone[60] = "[None]"; char acexpire[32] = "[None]", pwexpire[32] = "[None]"; struct tm * tptr; if (!pretty) { p = v7 ? pw_make_v7(pwd) : pw_make(pwd); printf("%s\n", p); free(p); return (EXIT_SUCCESS); } if ((p = strtok(pwd->pw_gecos, ",")) != NULL) { strlcpy(uname, p, sizeof(uname)); if ((p = strtok(NULL, ",")) != NULL) { strlcpy(office, p, sizeof(office)); if ((p = strtok(NULL, ",")) != NULL) { strlcpy(wphone, p, sizeof(wphone)); if ((p = strtok(NULL, "")) != NULL) { strlcpy(hphone, p, sizeof(hphone)); } } } } /* * Handle '&' in gecos field */ if ((p = strchr(uname, '&')) != NULL) { int l = strlen(pwd->pw_name); int m = strlen(p); memmove(p + l, p + 1, m); memmove(p, pwd->pw_name, l); *p = (char) toupper((unsigned char)*p); } if (pwd->pw_expire > (time_t)0 && (tptr = localtime(&pwd->pw_expire)) != NULL) strftime(acexpire, sizeof acexpire, "%c", tptr); if (pwd->pw_change > (time_t)0 && (tptr = localtime(&pwd->pw_change)) != NULL) strftime(pwexpire, sizeof pwexpire, "%c", tptr); printf("Login Name: %-15s #%-12ju Group: %-15s #%ju\n" " Full Name: %s\n" " Home: %-26.26s Class: %s\n" " Shell: %-26.26s Office: %s\n" "Work Phone: %-26.26s Home Phone: %s\n" "Acc Expire: %-26.26s Pwd Expire: %s\n", pwd->pw_name, (uintmax_t)pwd->pw_uid, grp ? grp->gr_name : "(invalid)", (uintmax_t)pwd->pw_gid, uname, pwd->pw_dir, pwd->pw_class, pwd->pw_shell, office, wphone, hphone, acexpire, pwexpire); SETGRENT(); j = 0; while ((grp=GETGRENT()) != NULL) { int i = 0; if (grp->gr_mem != NULL) { while (grp->gr_mem[i] != NULL) { if (strcmp(grp->gr_mem[i], pwd->pw_name)==0) { printf(j++ == 0 ? " Groups: %s" : ",%s", grp->gr_name); break; } ++i; } } } ENDGRENT(); printf("%s", j ? "\n" : ""); return (EXIT_SUCCESS); } char * pw_checkname(char *name, int gecos) { char showch[8]; const char *badchars, *ch, *showtype; int reject; ch = name; reject = 0; if (gecos) { /* See if the name is valid as a gecos (comment) field. */ badchars = ":!@"; showtype = "gecos field"; } else { /* See if the name is valid as a userid or group. */ badchars = " ,\t:+&#%$^()!@~*?<>=|\\/\""; showtype = "userid/group name"; /* Userids and groups can not have a leading '-'. */ if (*ch == '-') reject = 1; } if (!reject) { while (*ch) { if (strchr(badchars, *ch) != NULL || (!gecos && *ch < ' ') || *ch == 127) { reject = 1; break; } /* 8-bit characters are only allowed in GECOS fields */ if (!gecos && (*ch & 0x80)) { reject = 1; break; } ch++; } } /* * A `$' is allowed as the final character for userids and groups, * mainly for the benefit of samba. */ if (reject && !gecos) { if (*ch == '$' && *(ch + 1) == '\0') { reject = 0; ch++; } } if (reject) { snprintf(showch, sizeof(showch), (*ch >= ' ' && *ch < 127) ? "`%c'" : "0x%02x", *ch); errx(EX_DATAERR, "invalid character %s at position %td in %s", showch, (ch - name), showtype); } if (!gecos && (ch - name) > LOGNAMESIZE) errx(EX_USAGE, "name too long `%s' (max is %d)", name, LOGNAMESIZE); return (name); } static void rmat(uid_t uid) { DIR *d = opendir("/var/at/jobs"); if (d != NULL) { struct dirent *e; while ((e = readdir(d)) != NULL) { struct stat st; if (strncmp(e->d_name, ".lock", 5) != 0 && stat(e->d_name, &st) == 0 && !S_ISDIR(st.st_mode) && st.st_uid == uid) { char tmp[MAXPATHLEN]; snprintf(tmp, sizeof(tmp), "/usr/bin/atrm %s", e->d_name); system(tmp); } } closedir(d); } } static void rmopie(char const * name) { char tmp[1014]; FILE *fp; int fd; size_t len; off_t atofs = 0; if ((fd = openat(conf.rootfd, "etc/opiekeys", O_RDWR)) == -1) return; fp = fdopen(fd, "r+"); len = strlen(name); while (fgets(tmp, sizeof(tmp), fp) != NULL) { if (strncmp(name, tmp, len) == 0 && tmp[len]==' ') { /* Comment username out */ if (fseek(fp, atofs, SEEK_SET) == 0) fwrite("#", 1, 1, fp); break; } atofs = ftell(fp); } /* * If we got an error of any sort, don't update! */ fclose(fp); } int pw_user_next(int argc, char **argv, char *name __unused) { struct userconf *cnf = NULL; const char *cfg = NULL; int ch; bool quiet = false; uid_t next; while ((ch = getopt(argc, argv, "C:q")) != -1) { switch (ch) { case 'C': cfg = optarg; break; case 'q': quiet = true; break; } } if (quiet) freopen(_PATH_DEVNULL, "w", stderr); cnf = get_userconfig(cfg); next = pw_uidpolicy(cnf, -1); printf("%ju:", (uintmax_t)next); pw_groupnext(cnf, quiet); return (EXIT_SUCCESS); } int pw_user_show(int argc, char **argv, char *arg1) { struct passwd *pwd = NULL; char *name = NULL; intmax_t id = -1; int ch; bool all = false; bool pretty = false; bool force = false; bool v7 = false; bool quiet = false; if (arg1 != NULL) { if (arg1[strspn(arg1, "0123456789")] == '\0') id = pw_checkid(arg1, UID_MAX); else name = arg1; } while ((ch = getopt(argc, argv, "C:qn:u:FPa7")) != -1) { switch (ch) { case 'C': /* ignore compatibility */ break; case 'q': quiet = true; break; case 'n': name = optarg; break; case 'u': id = pw_checkid(optarg, UID_MAX); break; case 'F': force = true; break; case 'P': pretty = true; break; case 'a': all = true; break; case '7': v7 = true; break; } } if (quiet) freopen(_PATH_DEVNULL, "w", stderr); if (all) { SETPWENT(); while ((pwd = GETPWENT()) != NULL) print_user(pwd, pretty, v7); ENDPWENT(); return (EXIT_SUCCESS); } if (id < 0 && name == NULL) errx(EX_DATAERR, "username or id required"); pwd = (name != NULL) ? GETPWNAM(pw_checkname(name, 0)) : GETPWUID(id); if (pwd == NULL) { if (force) { pwd = &fakeuser; } else { if (name == NULL) errx(EX_NOUSER, "no such uid `%ju'", (uintmax_t) id); errx(EX_NOUSER, "no such user `%s'", name); } } return (print_user(pwd, pretty, v7)); } int pw_user_del(int argc, char **argv, char *arg1) { struct userconf *cnf = NULL; struct passwd *pwd = NULL; struct group *gr, *grp; char *name = NULL; char grname[MAXLOGNAME]; char *nispasswd = NULL; char file[MAXPATHLEN]; char home[MAXPATHLEN]; const char *cfg = NULL; struct stat st; intmax_t id = -1; int ch, rc; bool nis = false; bool deletehome = false; bool quiet = false; if (arg1 != NULL) { if (arg1[strspn(arg1, "0123456789")] == '\0') id = pw_checkid(arg1, UID_MAX); else name = arg1; } while ((ch = getopt(argc, argv, "C:qn:u:rYy:")) != -1) { switch (ch) { case 'C': cfg = optarg; break; case 'q': quiet = true; break; case 'n': name = optarg; break; case 'u': id = pw_checkid(optarg, UID_MAX); break; case 'r': deletehome = true; break; case 'y': nispasswd = optarg; break; case 'Y': nis = true; break; } } if (quiet) freopen(_PATH_DEVNULL, "w", stderr); if (id < 0 && name == NULL) errx(EX_DATAERR, "username or id required"); cnf = get_userconfig(cfg); if (nispasswd == NULL) nispasswd = cnf->nispasswd; pwd = (name != NULL) ? GETPWNAM(pw_checkname(name, 0)) : GETPWUID(id); if (pwd == NULL) { if (name == NULL) errx(EX_NOUSER, "no such uid `%ju'", (uintmax_t) id); errx(EX_NOUSER, "no such user `%s'", name); } if (PWF._altdir == PWF_REGULAR && ((pwd->pw_fields & _PWF_SOURCE) != _PWF_FILES)) { if ((pwd->pw_fields & _PWF_SOURCE) == _PWF_NIS) { if (!nis && nispasswd && *nispasswd != '/') errx(EX_NOUSER, "Cannot remove NIS user `%s'", name); } else { errx(EX_NOUSER, "Cannot remove non local user `%s'", name); } } id = pwd->pw_uid; if (name == NULL) name = pwd->pw_name; if (strcmp(pwd->pw_name, "root") == 0) errx(EX_DATAERR, "cannot remove user 'root'"); /* Remove opie record from /etc/opiekeys */ if (PWALTDIR() != PWF_ALT) rmopie(pwd->pw_name); if (!PWALTDIR()) { /* Remove crontabs */ snprintf(file, sizeof(file), "/var/cron/tabs/%s", pwd->pw_name); if (access(file, F_OK) == 0) { snprintf(file, sizeof(file), "crontab -u %s -r", pwd->pw_name); system(file); } } /* * Save these for later, since contents of pwd may be * invalidated by deletion */ snprintf(file, sizeof(file), "%s/%s", _PATH_MAILDIR, pwd->pw_name); strlcpy(home, pwd->pw_dir, sizeof(home)); gr = GETGRGID(pwd->pw_gid); if (gr != NULL) strlcpy(grname, gr->gr_name, LOGNAMESIZE); else grname[0] = '\0'; rc = delpwent(pwd); if (rc == -1) err(EX_IOERR, "user '%s' does not exist", pwd->pw_name); else if (rc != 0) err(EX_IOERR, "passwd update"); if (nis && nispasswd && *nispasswd=='/') { rc = delnispwent(nispasswd, name); if (rc == -1) warnx("WARNING: user '%s' does not exist in NIS passwd", pwd->pw_name); else if (rc != 0) warn("WARNING: NIS passwd update"); } grp = GETGRNAM(name); if (grp != NULL && (grp->gr_mem == NULL || *grp->gr_mem == NULL) && strcmp(name, grname) == 0) delgrent(GETGRNAM(name)); SETGRENT(); while ((grp = GETGRENT()) != NULL) { int i, j; char group[MAXLOGNAME]; if (grp->gr_mem == NULL) continue; for (i = 0; grp->gr_mem[i] != NULL; i++) { if (strcmp(grp->gr_mem[i], name) != 0) continue; for (j = i; grp->gr_mem[j] != NULL; j++) grp->gr_mem[j] = grp->gr_mem[j+1]; strlcpy(group, grp->gr_name, MAXLOGNAME); chggrent(group, grp); } } ENDGRENT(); pw_log(cnf, M_DELETE, W_USER, "%s(%ju) account removed", name, (uintmax_t)id); /* Remove mail file */ if (PWALTDIR() != PWF_ALT) unlinkat(conf.rootfd, file + 1, 0); /* Remove at jobs */ if (!PWALTDIR() && getpwuid(id) == NULL) rmat(id); /* Remove home directory and contents */ if (PWALTDIR() != PWF_ALT && deletehome && *home == '/' && GETPWUID(id) == NULL && fstatat(conf.rootfd, home + 1, &st, 0) != -1) { rm_r(conf.rootfd, home, id); pw_log(cnf, M_DELETE, W_USER, "%s(%ju) home '%s' %s" "removed", name, (uintmax_t)id, home, fstatat(conf.rootfd, home + 1, &st, 0) == -1 ? "" : "not " "completely "); } return (EXIT_SUCCESS); } int pw_user_lock(int argc, char **argv, char *arg1) { int ch; while ((ch = getopt(argc, argv, "Cq")) != -1) { switch (ch) { case 'C': case 'q': /* compatibility */ break; } } return (pw_userlock(arg1, M_LOCK)); } int pw_user_unlock(int argc, char **argv, char *arg1) { int ch; while ((ch = getopt(argc, argv, "Cq")) != -1) { switch (ch) { case 'C': case 'q': /* compatibility */ break; } } return (pw_userlock(arg1, M_UNLOCK)); } static struct group * group_from_name_or_id(char *name) { const char *errstr = NULL; struct group *grp; uintmax_t id; if ((grp = GETGRNAM(name)) == NULL) { id = strtounum(name, 0, GID_MAX, &errstr); if (errstr) errx(EX_NOUSER, "group `%s' does not exist", name); grp = GETGRGID(id); if (grp == NULL) errx(EX_NOUSER, "group `%s' does not exist", name); } return (grp); } static void split_groups(StringList **groups, char *groupsstr) { struct group *grp; char *p; char tok[] = ", \t"; for (p = strtok(groupsstr, tok); p != NULL; p = strtok(NULL, tok)) { grp = group_from_name_or_id(p); if (*groups == NULL) *groups = sl_init(); sl_add(*groups, newstr(grp->gr_name)); } } static void validate_grname(struct userconf *cnf, char *group) { struct group *grp; if (group == NULL || *group == '\0') { cnf->default_group = ""; return; } grp = group_from_name_or_id(group); cnf->default_group = newstr(grp->gr_name); } static mode_t validate_mode(char *mode) { mode_t m; void *set; if ((set = setmode(mode)) == NULL) errx(EX_DATAERR, "invalid directory creation mode '%s'", mode); m = getmode(set, _DEF_DIRMODE); free(set); return (m); } static void mix_config(struct userconf *cmdcnf, struct userconf *cfg) { if (cmdcnf->default_password == 0) cmdcnf->default_password = cfg->default_password; if (cmdcnf->reuse_uids == 0) cmdcnf->reuse_uids = cfg->reuse_uids; if (cmdcnf->reuse_gids == 0) cmdcnf->reuse_gids = cfg->reuse_gids; if (cmdcnf->nispasswd == NULL) cmdcnf->nispasswd = cfg->nispasswd; if (cmdcnf->dotdir == NULL) cmdcnf->dotdir = cfg->dotdir; if (cmdcnf->newmail == NULL) cmdcnf->newmail = cfg->newmail; if (cmdcnf->logfile == NULL) cmdcnf->logfile = cfg->logfile; if (cmdcnf->home == NULL) cmdcnf->home = cfg->home; if (cmdcnf->homemode == 0) cmdcnf->homemode = cfg->homemode; if (cmdcnf->shelldir == NULL) cmdcnf->shelldir = cfg->shelldir; if (cmdcnf->shells == NULL) cmdcnf->shells = cfg->shells; if (cmdcnf->shell_default == NULL) cmdcnf->shell_default = cfg->shell_default; if (cmdcnf->default_group == NULL) cmdcnf->default_group = cfg->default_group; if (cmdcnf->groups == NULL) cmdcnf->groups = cfg->groups; if (cmdcnf->default_class == NULL) cmdcnf->default_class = cfg->default_class; if (cmdcnf->min_uid == 0) cmdcnf->min_uid = cfg->min_uid; if (cmdcnf->max_uid == 0) cmdcnf->max_uid = cfg->max_uid; if (cmdcnf->min_gid == 0) cmdcnf->min_gid = cfg->min_gid; if (cmdcnf->max_gid == 0) cmdcnf->max_gid = cfg->max_gid; if (cmdcnf->expire_days == 0) cmdcnf->expire_days = cfg->expire_days; if (cmdcnf->password_days == 0) cmdcnf->password_days = cfg->password_days; } int pw_user_add(int argc, char **argv, char *arg1) { struct userconf *cnf, *cmdcnf; struct passwd *pwd; struct group *grp; struct stat st; char args[] = "C:qn:u:c:d:e:p:g:G:mM:k:s:oL:i:w:h:H:Db:NPy:Y"; char line[_PASSWORD_LEN+1], path[MAXPATHLEN]; char *gecos, *homedir, *skel, *walk, *userid, *groupid, *grname; char *default_passwd, *name, *p; const char *cfg; login_cap_t *lc; FILE *pfp, *fp; intmax_t id = -1; time_t now; int rc, ch, fd = -1; size_t i; bool dryrun, nis, pretty, quiet, createhome, precrypted, genconf; dryrun = nis = pretty = quiet = createhome = precrypted = false; genconf = false; gecos = homedir = skel = userid = groupid = default_passwd = NULL; grname = name = NULL; if ((cmdcnf = calloc(1, sizeof(struct userconf))) == NULL) err(EXIT_FAILURE, "calloc()"); if (arg1 != NULL) { if (arg1[strspn(arg1, "0123456789")] == '\0') id = pw_checkid(arg1, UID_MAX); else name = arg1; } while ((ch = getopt(argc, argv, args)) != -1) { switch (ch) { case 'C': cfg = optarg; break; case 'q': quiet = true; break; case 'n': name = optarg; break; case 'u': userid = optarg; break; case 'c': gecos = pw_checkname(optarg, 1); break; case 'd': homedir = optarg; break; case 'e': now = time(NULL); cmdcnf->expire_days = parse_date(now, optarg); break; case 'p': now = time(NULL); cmdcnf->password_days = parse_date(now, optarg); break; case 'g': validate_grname(cmdcnf, optarg); grname = optarg; break; case 'G': split_groups(&cmdcnf->groups, optarg); break; case 'm': createhome = true; break; case 'M': cmdcnf->homemode = validate_mode(optarg); break; case 'k': walk = skel = optarg; if (*walk == '/') walk++; if (fstatat(conf.rootfd, walk, &st, 0) == -1) errx(EX_OSFILE, "skeleton `%s' does not " "exists", skel); if (!S_ISDIR(st.st_mode)) errx(EX_OSFILE, "skeleton `%s' is not a " "directory", skel); cmdcnf->dotdir = skel; break; case 's': cmdcnf->shell_default = optarg; break; case 'o': conf.checkduplicate = false; break; case 'L': cmdcnf->default_class = pw_checkname(optarg, 0); break; case 'i': groupid = optarg; break; case 'w': default_passwd = optarg; break; case 'H': if (fd != -1) errx(EX_USAGE, "'-h' and '-H' are mutually " "exclusive options"); fd = pw_checkfd(optarg); precrypted = true; if (fd == '-') errx(EX_USAGE, "-H expects a file descriptor"); break; case 'h': if (fd != -1) errx(EX_USAGE, "'-h' and '-H' are mutually " "exclusive options"); fd = pw_checkfd(optarg); break; case 'D': genconf = true; break; case 'b': cmdcnf->home = optarg; break; case 'N': dryrun = true; break; case 'P': pretty = true; break; case 'y': cmdcnf->nispasswd = optarg; break; case 'Y': nis = true; break; } } if (geteuid() != 0 && ! dryrun) errx(EX_NOPERM, "you must be root"); if (quiet) freopen(_PATH_DEVNULL, "w", stderr); cnf = get_userconfig(cfg); mix_config(cmdcnf, cnf); if (default_passwd) cmdcnf->default_password = passwd_val(default_passwd, cnf->default_password); if (genconf) { if (name != NULL) errx(EX_DATAERR, "can't combine `-D' with `-n name'"); if (userid != NULL) { if ((p = strtok(userid, ", \t")) != NULL) cmdcnf->min_uid = pw_checkid(p, UID_MAX); if (cmdcnf->min_uid == 0) cmdcnf->min_uid = 1000; if ((p = strtok(NULL, " ,\t")) != NULL) cmdcnf->max_uid = pw_checkid(p, UID_MAX); if (cmdcnf->max_uid == 0) cmdcnf->max_uid = 32000; } if (groupid != NULL) { if ((p = strtok(groupid, ", \t")) != NULL) cmdcnf->min_gid = pw_checkid(p, GID_MAX); if (cmdcnf->min_gid == 0) cmdcnf->min_gid = 1000; if ((p = strtok(NULL, " ,\t")) != NULL) cmdcnf->max_gid = pw_checkid(p, GID_MAX); if (cmdcnf->max_gid == 0) cmdcnf->max_gid = 32000; } if (write_userconfig(cmdcnf, cfg)) return (EXIT_SUCCESS); err(EX_IOERR, "config update"); } if (userid) id = pw_checkid(userid, UID_MAX); if (id < 0 && name == NULL) errx(EX_DATAERR, "user name or id required"); if (name == NULL) errx(EX_DATAERR, "login name required"); if (GETPWNAM(name) != NULL) errx(EX_DATAERR, "login name `%s' already exists", name); pwd = &fakeuser; pwd->pw_name = name; pwd->pw_class = cmdcnf->default_class ? cmdcnf->default_class : ""; pwd->pw_uid = pw_uidpolicy(cmdcnf, id); pwd->pw_gid = pw_gidpolicy(cnf, grname, pwd->pw_name, (gid_t) pwd->pw_uid, dryrun); pwd->pw_change = cmdcnf->password_days; pwd->pw_expire = cmdcnf->expire_days; pwd->pw_dir = pw_homepolicy(cmdcnf, homedir, pwd->pw_name); pwd->pw_shell = pw_shellpolicy(cmdcnf); lc = login_getpwclass(pwd); if (lc == NULL || login_setcryptfmt(lc, "sha512", NULL) == NULL) warn("setting crypt(3) format"); login_close(lc); pwd->pw_passwd = pw_password(cmdcnf, pwd->pw_name, dryrun); if (pwd->pw_uid == 0 && strcmp(pwd->pw_name, "root") != 0) warnx("WARNING: new account `%s' has a uid of 0 " "(superuser access!)", pwd->pw_name); if (gecos) pwd->pw_gecos = gecos; if (fd != -1) pw_set_passwd(pwd, fd, precrypted, false); if (dryrun) return (print_user(pwd, pretty, false)); if ((rc = addpwent(pwd)) != 0) { if (rc == -1) errx(EX_IOERR, "user '%s' already exists", pwd->pw_name); else if (rc != 0) err(EX_IOERR, "passwd file update"); } if (nis && cmdcnf->nispasswd && *cmdcnf->nispasswd == '/') { printf("%s\n", cmdcnf->nispasswd); rc = addnispwent(cmdcnf->nispasswd, pwd); if (rc == -1) warnx("User '%s' already exists in NIS passwd", pwd->pw_name); else if (rc != 0) warn("NIS passwd update"); /* NOTE: we treat NIS-only update errors as non-fatal */ } if (cmdcnf->groups != NULL) { for (i = 0; i < cmdcnf->groups->sl_cur; i++) { grp = GETGRNAM(cmdcnf->groups->sl_str[i]); grp = gr_add(grp, pwd->pw_name); /* * grp can only be NULL in 2 cases: * - the new member is already a member * - a problem with memory occurs * in both cases we want to skip now. */ if (grp == NULL) continue; chggrent(grp->gr_name, grp); free(grp); } } pwd = GETPWNAM(name); if (pwd == NULL) errx(EX_NOUSER, "user '%s' disappeared during update", name); grp = GETGRGID(pwd->pw_gid); pw_log(cnf, M_ADD, W_USER, "%s(%ju):%s(%ju):%s:%s:%s", pwd->pw_name, (uintmax_t)pwd->pw_uid, grp ? grp->gr_name : "unknown", (uintmax_t)(grp ? grp->gr_gid : (uid_t)-1), pwd->pw_gecos, pwd->pw_dir, pwd->pw_shell); /* * let's touch and chown the user's mail file. This is not * strictly necessary under BSD with a 0755 maildir but it also * doesn't hurt anything to create the empty mailfile */ if (PWALTDIR() != PWF_ALT) { snprintf(path, sizeof(path), "%s/%s", _PATH_MAILDIR, pwd->pw_name); /* Preserve contents & mtime */ close(openat(conf.rootfd, path +1, O_RDWR | O_CREAT, 0600)); fchownat(conf.rootfd, path + 1, pwd->pw_uid, pwd->pw_gid, AT_SYMLINK_NOFOLLOW); } /* * Let's create and populate the user's home directory. Note * that this also `works' for editing users if -m is used, but * existing files will *not* be overwritten. */ if (PWALTDIR() != PWF_ALT && createhome && pwd->pw_dir && *pwd->pw_dir == '/' && pwd->pw_dir[1]) create_and_populate_homedir(cmdcnf, pwd, cmdcnf->dotdir, cmdcnf->homemode, false); if (!PWALTDIR() && cmdcnf->newmail && *cmdcnf->newmail && (fp = fopen(cnf->newmail, "r")) != NULL) { if ((pfp = popen(_PATH_SENDMAIL " -t", "w")) == NULL) warn("sendmail"); else { fprintf(pfp, "From: root\n" "To: %s\n" "Subject: Welcome!\n\n", pwd->pw_name); while (fgets(line, sizeof(line), fp) != NULL) { /* Do substitutions? */ fputs(line, pfp); } pclose(pfp); pw_log(cnf, M_ADD, W_USER, "%s(%ju) new user mail sent", pwd->pw_name, (uintmax_t)pwd->pw_uid); } fclose(fp); } if (nis && nis_update() == 0) pw_log(cnf, M_ADD, W_USER, "NIS maps updated"); return (EXIT_SUCCESS); } int pw_user_mod(int argc, char **argv, char *arg1) { struct userconf *cnf; struct passwd *pwd; struct group *grp; StringList *groups = NULL; char args[] = "C:qn:u:c:d:e:p:g:G:mM:l:k:s:w:L:h:H:NPYy:"; const char *cfg; char *gecos, *homedir, *grname, *name, *newname, *walk, *skel, *shell; char *passwd, *class, *nispasswd; login_cap_t *lc; struct stat st; intmax_t id = -1; int ch, fd = -1; size_t i, j; - bool quiet, createhome, pretty, dryrun, nis, edited, docreatehome; + bool quiet, createhome, pretty, dryrun, nis, edited; bool precrypted; mode_t homemode = 0; time_t expire_days, password_days, now; expire_days = password_days = -1; gecos = homedir = grname = name = newname = skel = shell =NULL; passwd = NULL; class = nispasswd = NULL; quiet = createhome = pretty = dryrun = nis = precrypted = false; - edited = docreatehome = false; + edited = false; if (arg1 != NULL) { if (arg1[strspn(arg1, "0123456789")] == '\0') id = pw_checkid(arg1, UID_MAX); else name = arg1; } while ((ch = getopt(argc, argv, args)) != -1) { switch (ch) { case 'C': cfg = optarg; break; case 'q': quiet = true; break; case 'n': name = optarg; break; case 'u': id = pw_checkid(optarg, UID_MAX); break; case 'c': gecos = pw_checkname(optarg, 1); break; case 'd': homedir = optarg; break; case 'e': now = time(NULL); expire_days = parse_date(now, optarg); break; case 'p': now = time(NULL); password_days = parse_date(now, optarg); break; case 'g': group_from_name_or_id(optarg); grname = optarg; break; case 'G': split_groups(&groups, optarg); break; case 'm': createhome = true; break; case 'M': homemode = validate_mode(optarg); break; case 'l': newname = optarg; break; case 'k': walk = skel = optarg; if (*walk == '/') walk++; if (fstatat(conf.rootfd, walk, &st, 0) == -1) errx(EX_OSFILE, "skeleton `%s' does not " "exists", skel); if (!S_ISDIR(st.st_mode)) errx(EX_OSFILE, "skeleton `%s' is not a " "directory", skel); break; case 's': shell = optarg; break; case 'w': passwd = optarg; break; case 'L': class = pw_checkname(optarg, 0); break; case 'H': if (fd != -1) errx(EX_USAGE, "'-h' and '-H' are mutually " "exclusive options"); fd = pw_checkfd(optarg); precrypted = true; if (fd == '-') errx(EX_USAGE, "-H expects a file descriptor"); break; case 'h': if (fd != -1) errx(EX_USAGE, "'-h' and '-H' are mutually " "exclusive options"); fd = pw_checkfd(optarg); break; case 'N': dryrun = true; break; case 'P': pretty = true; break; case 'y': nispasswd = optarg; break; case 'Y': nis = true; break; } } if (geteuid() != 0 && ! dryrun) errx(EX_NOPERM, "you must be root"); if (quiet) freopen(_PATH_DEVNULL, "w", stderr); cnf = get_userconfig(cfg); if (id < 0 && name == NULL) errx(EX_DATAERR, "username or id required"); pwd = (name != NULL) ? GETPWNAM(pw_checkname(name, 0)) : GETPWUID(id); if (pwd == NULL) { if (name == NULL) errx(EX_NOUSER, "no such uid `%ju'", (uintmax_t) id); errx(EX_NOUSER, "no such user `%s'", name); } if (name == NULL) name = pwd->pw_name; if (nis && nispasswd == NULL) nispasswd = cnf->nispasswd; if (PWF._altdir == PWF_REGULAR && ((pwd->pw_fields & _PWF_SOURCE) != _PWF_FILES)) { if ((pwd->pw_fields & _PWF_SOURCE) == _PWF_NIS) { if (!nis && nispasswd && *nispasswd != '/') errx(EX_NOUSER, "Cannot modify NIS user `%s'", name); } else { errx(EX_NOUSER, "Cannot modify non local user `%s'", name); } } if (newname) { if (strcmp(pwd->pw_name, "root") == 0) errx(EX_DATAERR, "can't rename `root' account"); if (strcmp(pwd->pw_name, newname) != 0) { pwd->pw_name = pw_checkname(newname, 0); edited = true; } } if (id >= 0 && pwd->pw_uid != id) { pwd->pw_uid = id; edited = true; if (pwd->pw_uid != 0 && strcmp(pwd->pw_name, "root") == 0) errx(EX_DATAERR, "can't change uid of `root' account"); if (pwd->pw_uid == 0 && strcmp(pwd->pw_name, "root") != 0) warnx("WARNING: account `%s' will have a uid of 0 " "(superuser access!)", pwd->pw_name); } if (grname && pwd->pw_uid != 0) { grp = GETGRNAM(grname); if (grp == NULL) grp = GETGRGID(pw_checkid(grname, GID_MAX)); if (grp->gr_gid != pwd->pw_gid) { pwd->pw_gid = grp->gr_gid; edited = true; } } if (password_days >= 0 && pwd->pw_change != password_days) { pwd->pw_change = password_days; edited = true; } if (expire_days >= 0 && pwd->pw_expire != expire_days) { pwd->pw_expire = expire_days; edited = true; } if (shell) { shell = shell_path(cnf->shelldir, cnf->shells, shell); if (shell == NULL) shell = ""; if (strcmp(shell, pwd->pw_shell) != 0) { pwd->pw_shell = shell; edited = true; } } if (class && strcmp(pwd->pw_class, class) != 0) { pwd->pw_class = class; edited = true; } if (homedir && strcmp(pwd->pw_dir, homedir) != 0) { pwd->pw_dir = homedir; edited = true; if (fstatat(conf.rootfd, pwd->pw_dir, &st, 0) == -1) { if (!createhome) warnx("WARNING: home `%s' does not exist", pwd->pw_dir); - else - docreatehome = true; } else if (!S_ISDIR(st.st_mode)) { warnx("WARNING: home `%s' is not a directory", pwd->pw_dir); } } if (passwd && conf.fd == -1) { lc = login_getpwclass(pwd); if (lc == NULL || login_setcryptfmt(lc, "sha512", NULL) == NULL) warn("setting crypt(3) format"); login_close(lc); cnf->default_password = passwd_val(passwd, cnf->default_password); pwd->pw_passwd = pw_password(cnf, pwd->pw_name, dryrun); edited = true; } if (gecos && strcmp(pwd->pw_gecos, gecos) != 0) { pwd->pw_gecos = gecos; edited = true; } if (fd != -1) edited = pw_set_passwd(pwd, fd, precrypted, true); if (dryrun) return (print_user(pwd, pretty, false)); if (edited) /* Only updated this if required */ perform_chgpwent(name, pwd, nis ? nispasswd : NULL); /* Now perform the needed changes concern groups */ if (groups != NULL) { /* Delete User from groups using old name */ SETGRENT(); while ((grp = GETGRENT()) != NULL) { if (grp->gr_mem == NULL) continue; for (i = 0; grp->gr_mem[i] != NULL; i++) { if (strcmp(grp->gr_mem[i] , name) != 0) continue; for (j = i; grp->gr_mem[j] != NULL ; j++) grp->gr_mem[j] = grp->gr_mem[j+1]; chggrent(grp->gr_name, grp); break; } } ENDGRENT(); /* Add the user to the needed groups */ for (i = 0; i < groups->sl_cur; i++) { grp = GETGRNAM(groups->sl_str[i]); grp = gr_add(grp, pwd->pw_name); if (grp == NULL) continue; chggrent(grp->gr_name, grp); free(grp); } } /* In case of rename we need to walk over the different groups */ if (newname) { SETGRENT(); while ((grp = GETGRENT()) != NULL) { if (grp->gr_mem == NULL) continue; for (i = 0; grp->gr_mem[i] != NULL; i++) { if (strcmp(grp->gr_mem[i], name) != 0) continue; grp->gr_mem[i] = newname; chggrent(grp->gr_name, grp); break; } } } /* go get a current version of pwd */ if (newname) name = newname; pwd = GETPWNAM(name); if (pwd == NULL) errx(EX_NOUSER, "user '%s' disappeared during update", name); grp = GETGRGID(pwd->pw_gid); pw_log(cnf, M_UPDATE, W_USER, "%s(%ju):%s(%ju):%s:%s:%s", pwd->pw_name, (uintmax_t)pwd->pw_uid, grp ? grp->gr_name : "unknown", (uintmax_t)(grp ? grp->gr_gid : (uid_t)-1), pwd->pw_gecos, pwd->pw_dir, pwd->pw_shell); /* * Let's create and populate the user's home directory. Note * that this also `works' for editing users if -m is used, but * existing files will *not* be overwritten. */ - if (PWALTDIR() != PWF_ALT && docreatehome && pwd->pw_dir && + if (PWALTDIR() != PWF_ALT && createhome && pwd->pw_dir && *pwd->pw_dir == '/' && pwd->pw_dir[1]) { if (!skel) skel = cnf->dotdir; if (homemode == 0) homemode = cnf->homemode; create_and_populate_homedir(cnf, pwd, skel, homemode, true); } if (nis && nis_update() == 0) pw_log(cnf, M_UPDATE, W_USER, "NIS maps updated"); return (EXIT_SUCCESS); } Index: projects/clang400-import/usr.sbin/pw/tests/pw_usermod.sh =================================================================== --- projects/clang400-import/usr.sbin/pw/tests/pw_usermod.sh (revision 312719) +++ projects/clang400-import/usr.sbin/pw/tests/pw_usermod.sh (revision 312720) @@ -1,278 +1,299 @@ # $FreeBSD$ # Import helper functions . $(atf_get_srcdir)/helper_functions.shin # Test modifying a user atf_test_case user_mod user_mod_body() { populate_etc_skel atf_check -s exit:67 -e match:"no such user" ${PW} usermod test atf_check -s exit:0 ${PW} useradd test atf_check -s exit:0 ${PW} usermod test atf_check -s exit:0 -o match:"^test:.*" \ grep "^test:.*" $HOME/master.passwd } # Test modifying a user with option -N atf_test_case user_mod_noupdate user_mod_noupdate_body() { populate_etc_skel atf_check -s exit:67 -e match:"no such user" ${PW} usermod test -N atf_check -s exit:0 ${PW} useradd test atf_check -s exit:0 -o match:"^test:.*" ${PW} usermod test -N atf_check -s exit:0 -o match:"^test:.*" \ grep "^test:.*" $HOME/master.passwd } # Test modifying a user with comments atf_test_case user_mod_comments user_mod_comments_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd test -c "Test User,home,123,456" atf_check -s exit:0 ${PW} usermod test -c "Test User,work,123,456" atf_check -s exit:0 -o match:"^test:.*:Test User,work,123,456:" \ grep "^test:.*:Test User,work,123,456:" $HOME/master.passwd } # Test modifying a user with comments with option -N atf_test_case user_mod_comments_noupdate user_mod_comments_noupdate_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd test -c "Test User,home,123,456" atf_check -s exit:0 -o match:"^test:.*:Test User,work,123,456:" \ ${PW} usermod test -c "Test User,work,123,456" -N atf_check -s exit:0 -o match:"^test:.*:Test User,home,123,456:" \ grep "^test:.*:Test User,home,123,456:" $HOME/master.passwd } # Test modifying a user with invalid comments atf_test_case user_mod_comments_invalid user_mod_comments_invalid_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd test atf_check -s exit:65 -e match:"invalid character" \ ${PW} usermod test -c "Test User,work,123:456,456" atf_check -s exit:1 -o empty \ grep "^test:.*:Test User,work,123:456,456:" $HOME/master.passwd atf_check -s exit:0 -o match:"^test:\*" \ grep "^test:\*" $HOME/master.passwd } # Test modifying a user with invalid comments with option -N atf_test_case user_mod_comments_invalid_noupdate user_mod_comments_invalid_noupdate_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd test atf_check -s exit:65 -e match:"invalid character" \ ${PW} usermod test -c "Test User,work,123:456,456" -N atf_check -s exit:1 -o empty \ grep "^test:.*:Test User,work,123:456,456:" $HOME/master.passwd atf_check -s exit:0 -o match:"^test:\*" \ grep "^test:\*" $HOME/master.passwd } # Test modifying a user name with -l atf_test_case user_mod_name user_mod_name_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -l "bar" atf_check -s exit:0 -o match:"^bar:.*" \ grep "^bar:.*" $HOME/master.passwd } # Test modifying a user name with -l with option -N atf_test_case user_mod_name_noupdate user_mod_name_noupdate_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 -o match:"^bar:.*" ${PW} usermod foo -l "bar" -N atf_check -s exit:0 -o match:"^foo:.*" \ grep "^foo:.*" $HOME/master.passwd } atf_test_case user_mod_rename_multigroups user_mod_rename_multigroups_body() { populate_etc_skel atf_check -s exit:0 ${PW} groupadd test1 atf_check -s exit:0 ${PW} groupadd test2 atf_check -s exit:0 ${PW} useradd foo -G test1,test2 atf_check -o match:"foo" -s exit:0 ${PW} groupshow test1 atf_check -o match:"foo" -s exit:0 ${PW} groupshow test2 atf_check -s exit:0 ${PW} usermod foo -l bar atf_check -o match:"bar" -s exit:0 ${PW} groupshow test1 atf_check -o match:"bar" -s exit:0 ${PW} groupshow test2 } atf_test_case user_mod_nogroups user_mod_nogroups_body() { populate_etc_skel atf_check -s exit:0 ${PW} groupadd test1 atf_check -s exit:0 ${PW} groupadd test2 atf_check -s exit:0 ${PW} groupadd test3 atf_check -s exit:0 ${PW} groupadd test4 atf_check -s exit:0 ${PW} useradd foo -G test1,test2 atf_check -o match:"foo" -s exit:0 ${PW} groupshow test1 atf_check -o match:"foo" -s exit:0 ${PW} groupshow test2 atf_check -s exit:0 ${PW} usermod foo -G test3,test4 atf_check -s exit:0 -o inline:"test3\ntest4\n" \ awk -F\: '$4 == "foo" { print $1 }' ${HOME}/group } atf_test_case user_mod_rename user_mod_rename_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -l bar atf_check -s exit:0 -o match:"^bar:.*" \ grep "^bar:.*" ${HOME}/master.passwd } atf_test_case user_mod_rename_too_long user_mod_rename_too_long_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:64 -e match:"too long" ${PW} usermod foo \ -l name_very_very_very_very_very_long } atf_test_case user_mod_h user_mod_h_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -h 0 <<- EOF $(echo a) EOF passhash=`awk -F ':' '/^foo:/ {print $2}' $HOME/master.passwd` atf_check -s exit:0 -o inline:$passhash \ $(atf_get_srcdir)/crypt $passhash "a" atf_check -s exit:0 ${PW} usermod foo -h - <<- EOF $(echo b) EOF atf_check -s exit:0 -o match:"^foo:\*:.*" \ grep "^foo" ${HOME}/master.passwd atf_check -e inline:"pw: Bad file descriptor 'a': invalid\n" \ -s exit:64 ${PW} usermod foo -h a <<- EOF $(echo a) EOF } atf_test_case user_mod_H user_mod_H_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -H 0 <<- EOF $(echo a) EOF atf_check -s exit:0 -o match:"^foo:a:.*" \ grep "^foo" ${HOME}/master.passwd atf_check -s exit:64 -e inline:"pw: -H expects a file descriptor\n" \ ${PW} usermod foo -H - } atf_test_case user_mod_renamehome user_mod_renamehome_body() { populate_root_etc_skel mkdir -p ${HOME}/home atf_check -s exit:0 ${RPW} useradd foo -m test -d ${HOME}/home/foo || atf_fail "Directory not created" atf_check -s exit:0 ${RPW} usermod foo -l bar -d /home/bar -m test -d ${HOME}/home/bar || atf_fail "Directory not created" } atf_test_case user_mod_uid user_mod_uid_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -u 5000 } atf_test_case user_mod_w_error user_mod_w_error_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:1 -e match:"pw: Invalid value for default password" \ ${PW} usermod foo -w invalid_value } atf_test_case user_mod_w_no user_mod_w_no_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -w no atf_check -s exit:0 -o match:"^foo:\*" grep "^foo:" $HOME/master.passwd } atf_test_case user_mod_w_none user_mod_w_none_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -w none atf_check -s exit:0 -o match:"^foo::" grep "^foo:" $HOME/master.passwd } atf_test_case user_mod_w_random user_mod_w_random_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo password=`${PW} usermod foo -w random | cat` passhash=`awk -F ':' '/^foo:/ {print $2}' $HOME/master.passwd` atf_check -s exit:0 -o inline:$passhash \ $(atf_get_srcdir)/crypt $passhash "$password" } atf_test_case user_mod_w_yes user_mod_w_yes_body() { populate_etc_skel atf_check -s exit:0 ${PW} useradd foo atf_check -s exit:0 ${PW} usermod foo -w yes passhash=`awk -F ':' '/^foo:/ {print $2}' $HOME/master.passwd` atf_check -s exit:0 -o inline:$passhash \ $(atf_get_srcdir)/crypt $passhash "foo" } +atf_test_case user_mod_m +user_mod_m_body() { + populate_root_etc_skel + mkdir -p ${HOME}/home + mkdir -p ${HOME}/skel + echo "entry" > ${HOME}/skel/.file + atf_check -s exit:0 ${RPW} useradd foo + ! test -d ${HOME}/home/foo || atf_fail "Directory should not have been created" + atf_check -s exit:0 ${RPW} usermod foo -m -k /skel + test -d ${HOME}/home/foo || atf_fail "Directory should have been created" + test -f ${HOME}/home/foo/.file || atf_fail "Skell files not added" + echo "entry" > ${HOME}/skel/.file2 + atf_check -s exit:0 ${RPW} usermod foo -m -k /skel + test -f ${HOME}/home/foo/.file2 || atf_fail "Skell files not added" + echo > ${HOME}/home/foo/.file2 + atf_check -s exit:0 ${RPW} usermod foo -m -k /skel + atf_check -s exit:0 -o inline:"\n" cat ${HOME}/home/foo/.file2 +} + + atf_init_test_cases() { atf_add_test_case user_mod atf_add_test_case user_mod_noupdate atf_add_test_case user_mod_comments atf_add_test_case user_mod_comments_noupdate atf_add_test_case user_mod_comments_invalid atf_add_test_case user_mod_comments_invalid_noupdate atf_add_test_case user_mod_nogroups atf_add_test_case user_mod_rename atf_add_test_case user_mod_name_noupdate atf_add_test_case user_mod_rename_too_long atf_add_test_case user_mod_rename_multigroups atf_add_test_case user_mod_h atf_add_test_case user_mod_H atf_add_test_case user_mod_renamehome atf_add_test_case user_mod_uid atf_add_test_case user_mod_w_error atf_add_test_case user_mod_w_no atf_add_test_case user_mod_w_none atf_add_test_case user_mod_w_random atf_add_test_case user_mod_w_yes + atf_add_test_case user_mod_m } Index: projects/clang400-import/usr.sbin/wpa/wpa_cli/wpa_cli.8 =================================================================== --- projects/clang400-import/usr.sbin/wpa/wpa_cli/wpa_cli.8 (revision 312719) +++ projects/clang400-import/usr.sbin/wpa/wpa_cli/wpa_cli.8 (revision 312720) @@ -1,222 +1,336 @@ .\" Copyright (c) 2005 Sam Leffler .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd June 16, 2005 +.Dd January 24, 2017 .Dt WPA_CLI 8 .Os .Sh NAME .Nm wpa_cli .Nd "text-based frontend program for interacting with wpa_supplicant" .Sh SYNOPSIS -.Nm -.Op Ar commands +.Nm wpa_cli +.Op Fl p Ar path_to_ctrl_sockets +.Op Fl i Ar ifname +.Op Fl hvB +.Op Fl a Ar action_file +.Op Fl P Ar pid_file +.Op Fl g Ar global_ctrl +.Op Fl G Ar ping_interval +.Ar command ... .Sh DESCRIPTION The .Nm utility is a text-based frontend program for interacting with .Xr wpa_supplicant 8 . It is used to query current status, change configuration, trigger events, and request interactive user input. .Pp The .Nm utility can show the current authentication status, selected security mode, dot11 and dot1x MIBs, etc. In addition, .Nm can configure EAPOL state machine parameters and trigger events such as reassociation and IEEE 802.1X logoff/logon. .Pp The .Nm utility provides an interface to supply authentication information such as username and password when it is not provided in the .Xr wpa_supplicant.conf 5 configuration file. This can be used, for example, to implement one-time passwords or generic token card authentication where the authentication is based on a challenge-response that uses an external device for generating the response. .Pp The .Nm utility supports two modes: interactive and command line. Both modes share the same command set and the main difference is in interactive mode providing access to unsolicited messages (event messages, username/password requests). .Pp Interactive mode is started when .Nm is executed without any parameters on the command line. Commands are then entered from the controlling terminal in response to the .Nm prompt. In command line mode, the same commands are entered as command line arguments. .Pp The control interface of .Xr wpa_supplicant 8 can be configured to allow non-root user access by using the .Va ctrl_interface_group parameter in the .Xr wpa_supplicant.conf 5 configuration file. This makes it possible to run .Nm with a normal user account. .Sh AUTHENTICATION PARAMETERS When .Xr wpa_supplicant 8 needs authentication parameters, such as username and password, that are not present in the configuration file, it sends a request message to all attached frontend programs, e.g., .Nm in interactive mode. The .Nm utility shows these requests with a .Dq Li CTRL-REQ- Ns Ao Ar type Ac Ns Li - Ns Ao Ar id Ac Ns : Ns Aq Ar text prefix, where .Aq Ar type is .Li IDENTITY , PASSWORD , or .Li OTP (One-Time Password), .Aq Ar id is a unique identifier for the current network, .Aq Ar text is a description of the request. In the case of an .Li OTP (One-Time Password) request, it includes the challenge from the authentication server. .Pp A user must supply .Xr wpa_supplicant 8 the needed parameters in response to these requests. .Pp For example, .Bd -literal -offset indent CTRL-REQ-PASSWORD-1:Password needed for SSID foobar > password 1 mysecretpassword Example request for generic token card challenge-response: CTRL-REQ-OTP-2:Challenge 1235663 needed for SSID foobar > otp 2 9876 .Ed +.Sh OPTIONS +These options are available: +.Bl -tag -width indent +.It Fl p Ar path +Control sockets path. +This should match the +.Ic ctrl_interface +in +.Xr wpa_supplicant.conf 5 . +The default path is +.Pa /var/run/wpa_supplicant . +.It Fl i Ar ifname +Interface to be configured. +By default, the first interface found in the socket path is used. +.It Fl h +Show help. +.It Fl v +Show version information. +.It Fl B +Run the daemon in the background. +.It Fl a Ar action_file +Run in daemon mode, executing the action file based on events from +.Xr wpa_supplicant 8 . +.It Fl P Ar pid_file +PID file location. +.It Fl g Ar global_ctrl +Use a global control interface to +.Xr wpa_supplicant 8 +rather than the default Unix domain sockets. +.It Fl G Ar ping_interval +Wait +.Dq ping_interval +seconds before sending each ping to +.Xr wpa_supplicant 8 . +See the +.Ic ping +command. +.It command +See available commands in the next section. +.El .Sh COMMANDS -The following commands may be supplied on the command line +These commands can be supplied on the command line or at a prompt when operating interactively. .Bl -tag -width indent .It Ic status Report the current WPA/EAPOL/EAP status for the current interface. +.It Ic ifname +Show the current interface name. +The default interface is the first interface found in the socket path. +.It Ic ping +Ping the +.Xr wpa_supplicant 8 +utility. +This command can be used to test the status of the +.Xr wpa_supplicant 8 +daemon. .It Ic mib Report MIB variables (dot1x, dot11) for the current interface. .It Ic help Show usage help. .It Ic interface Op Ar ifname Show available interfaces and/or set the current interface -when multiple are available. +when multiple interfaces are available. .It Ic level Ar debug_level Change the debugging level in .Xr wpa_supplicant 8 . Larger numbers generate more messages. .It Ic license -Display the full -license for +Display the full license for .Nm . .It Ic logoff Send the IEEE 802.1X EAPOL state machine into the .Dq logoff state. .It Ic logon Send the IEEE 802.1X EAPOL state machine into the .Dq logon state. .It Ic set Op Ar settings Set variables. When no arguments are supplied, the known variables and their settings are displayed. .It Ic pmksa Show the contents of the PMKSA cache. .It Ic reassociate Force a reassociation to the current access point. .It Ic reconfigure Force .Xr wpa_supplicant 8 to re-read its configuration file. .It Ic preauthenticate Ar BSSID Force preauthentication of the specified .Ar BSSID . .It Ic identity Ar network_id identity Configure an identity for an SSID. .It Ic password Ar network_id password Configure a password for an SSID. +.It Ic new_password Ar network_id password +Change the password for an SSID. +.It Ic PIN Ar network_id pin +Configure a PIN for an SSID. +.It Ic passphrase Ar network_id passphrase +Configure a private key passphrase for an SSID. +.It Ic bssid Ar network_id bssid +Set a preferred BSSID for an SSID +.It Ic blacklist Op Ar bssid | clear +Add a BSSID to the blacklist. +When invoked without any extra arguments, display the blacklist. +Specifying +.Ar clear +causes +.Nm +to clear the blacklist. +.It Ic list_networks +List configured networks. +.It Ic select_network Ar network_id +Select a network and disable others. +.It Ic enable_network Ar network_id +Enable a network. +.It Ic disable_network Ar network_id +Disable a network. +.It Ic add_network +Add a network. +.It Ic remove_network Ar network_id +Remove a network. +.It Ic set_network Op Ar network_id variable value +Set network variables. +Shows a list of variables when run without arguments. +.It Ic get_network Ar network_id variable +Get network variables. +.It Ic disconnect +Disconnect and wait for reassociate/reconnect command before connecting. +.It Ic reconnect +Similar to +.Ic reassociate , +but only takes effect if already disconnected. +.It Ic scan +Request new BSS scan. +.It Ic scan_results +Get the latest BSS scan results. +This command can be invoked after running a BSS scan with +.Ic scan . +.It Ic bss Op Ar idx | bssid +Get a detailed BSS scan result for the network identified by +.Dq bssid +or +.Dq idx . .It Ic otp Ar network_id password Configure a one-time password for an SSID. .It Ic terminate Force .Xr wpa_supplicant 8 to terminate. +.It Ic interface_add Ar ifname Op Ar confname driver ctrl_interface driver_param bridge_name +Add a new interface with the given parameters. +.It Ic interface_remove Ar ifname +Remove the interface. +.It Ic interface_list +List available interfaces. .It Ic quit Exit .Nm . .El .Sh SEE ALSO .Xr wpa_supplicant.conf 5 , .Xr wpa_supplicant 8 .Sh HISTORY The .Nm utility first appeared in .Fx 6.0 . .Sh AUTHORS The .Nm utility was written by .An Jouni Malinen Aq Mt j@w1.fi . This manual page is derived from the .Pa README -file included in the +and +.Pa wpa_cli.c +files included in the .Nm wpa_supplicant distribution. Index: projects/clang400-import =================================================================== --- projects/clang400-import (revision 312719) +++ projects/clang400-import (revision 312720) Property changes on: projects/clang400-import ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r312624-312719